diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1b15dcf882a..4490890d001 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -23,7 +23,7 @@ // "forwardPorts": [], // Uncomment the next line to run commands after the container is created. - // "postCreateCommand": "make install-python-ci-dependencies-uv-venv" + // "postCreateCommand": "make install-python-dependencies-dev" // Configure tool-specific properties. // "customizations": {}, diff --git a/.github/actions/get-semantic-release-version/action.yml b/.github/actions/get-semantic-release-version/action.yml new file mode 100644 index 00000000000..89f6a8f81c1 --- /dev/null +++ b/.github/actions/get-semantic-release-version/action.yml @@ -0,0 +1,87 @@ +name: Get semantic release version +description: "" +inputs: + custom_version: # Optional input for a custom version + description: "Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing" + required: false + token: + description: "Personal Access Token" + required: true + default: "" +outputs: + release_version: + description: "The release version to use (e.g., v1.2.3)" + value: ${{ steps.get_release_version.outputs.release_version }} + version_without_prefix: + description: "The release version to use without 'v' (e.g., 1.2.3)" + value: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} + highest_semver_tag: + description: "The highest semantic version tag without the 'v' prefix (e.g., 1.2.3)" + value: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} +runs: + using: composite + steps: + - name: Get release version + id: get_release_version + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.token }} + GIT_AUTHOR_NAME: feast-ci-bot + GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co + GIT_COMMITTER_NAME: feast-ci-bot + GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co + run: | + if [[ -n "${{ inputs.custom_version }}" ]]; then + VERSION_REGEX="^v[0-9]+\.[0-9]+\.[0-9]+$" + echo "Using custom version: ${{ inputs.custom_version }}" + if [[ ! "${{ inputs.custom_version }}" =~ $VERSION_REGEX ]]; then + echo "Error: custom_version must match semantic versioning (e.g., v1.2.3)." + exit 1 + fi + echo "::set-output name=release_version::${{ inputs.custom_version }}" + elif [[ "${GITHUB_REF}" == refs/tags/* ]]; then + echo "Using tag reference: ${GITHUB_REF#refs/tags/}" + echo "::set-output name=release_version::${GITHUB_REF#refs/tags/}" + else + echo "Defaulting to branch name: ${GITHUB_REF#refs/heads/}" + echo "::set-output name=release_version::${GITHUB_REF#refs/heads/}" + fi + - name: Get release version without prefix + id: get_release_version_without_prefix + shell: bash + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + run: | + if [[ "${RELEASE_VERSION}" == v* ]]; then + echo "::set-output name=version_without_prefix::${RELEASE_VERSION:1}" + else + echo "::set-output name=version_without_prefix::${RELEASE_VERSION}" + fi + - name: Get highest semver + id: get_highest_semver + shell: bash + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + run: | + if [[ -n "${{ inputs.custom_version }}" ]]; then + HIGHEST_SEMVER_TAG="${{ inputs.custom_version }}" + echo "::set-output name=highest_semver_tag::$HIGHEST_SEMVER_TAG" + echo "Using custom version as highest semantic version: $HIGHEST_SEMVER_TAG" + else + source infra/scripts/setup-common-functions.sh + SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' + if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then + echo ::set-output name=highest_semver_tag::$(get_tag_release -m) + echo "Using infra/scripts/setup-common-functions.sh to generate highest semantic version: $HIGHEST_SEMVER_TAG" + fi + fi + - name: Check output + shell: bash + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} + HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} + run: | + echo $RELEASE_VERSION + echo $VERSION_WITHOUT_PREFIX + echo $HIGHEST_SEMVER_TAG \ No newline at end of file diff --git a/.github/fork_workflows/fork_pr_integration_tests_aws.yml b/.github/fork_workflows/fork_pr_integration_tests_aws.yml index 6eb8b8feff0..d0257ecaca9 100644 --- a/.github/fork_workflows/fork_pr_integration_tests_aws.yml +++ b/.github/fork_workflows/fork_pr_integration_tests_aws.yml @@ -73,7 +73,7 @@ jobs: sudo apt update sudo apt install -y -V libarrow-dev - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Setup Redis Cluster run: | docker pull vishnunair/docker-redis-cluster:latest @@ -85,5 +85,3 @@ jobs: pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "File and not Snowflake and not BigQuery and not minio_registry" pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "dynamo and not Snowflake and not BigQuery and not minio_registry" pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "Redshift and not Snowflake and not BigQuery and not minio_registry" - - diff --git a/.github/fork_workflows/fork_pr_integration_tests_gcp.yml b/.github/fork_workflows/fork_pr_integration_tests_gcp.yml index be9844a7e93..a6221d3b7ac 100644 --- a/.github/fork_workflows/fork_pr_integration_tests_gcp.yml +++ b/.github/fork_workflows/fork_pr_integration_tests_gcp.yml @@ -75,7 +75,7 @@ jobs: sudo apt update sudo apt install -y -V libarrow-dev - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Setup Redis Cluster run: | docker pull vishnunair/docker-redis-cluster:latest @@ -86,4 +86,3 @@ jobs: run: | pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "BigQuery and not dynamo and not Redshift and not Snowflake and not minio_registry" pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "File and not dynamo and not Redshift and not Snowflake and not minio_registry" - diff --git a/.github/fork_workflows/fork_pr_integration_tests_snowflake.yml b/.github/fork_workflows/fork_pr_integration_tests_snowflake.yml index a136b47b9e7..9698fe12cd7 100644 --- a/.github/fork_workflows/fork_pr_integration_tests_snowflake.yml +++ b/.github/fork_workflows/fork_pr_integration_tests_snowflake.yml @@ -65,7 +65,7 @@ jobs: sudo apt update sudo apt install -y -V libarrow-dev - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Setup Redis Cluster run: | docker pull vishnunair/docker-redis-cluster:latest @@ -82,4 +82,3 @@ jobs: run: | pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "Snowflake and not dynamo and not Redshift and not Bigquery and not gcp and not minio_registry" pytest -n 8 --cov=./ --cov-report=xml --color=yes sdk/python/tests --integration --durations=5 --timeout=1200 --timeout_method=thread -k "File and not dynamo and not Redshift and not Bigquery and not gcp and not minio_registry" - diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 8e52ba12c9e..15a6571367c 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -1,58 +1,35 @@ -name: build_wheels +name: build wheels # Call this workflow from other workflows in the repository by specifying "uses: ./.github/workflows/build_wheels.yml" # Developers who are starting a new release should use this workflow to ensure wheels will be built correctly. # Devs should check out their fork, add a tag to the last master commit on their fork, and run the release off of their fork on the added tag to ensure wheels will be built correctly. on: - workflow_dispatch: - tags: - - 'v*.*.*' + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string workflow_call: + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string jobs: - get-version: - runs-on: ubuntu-latest - outputs: - release_version: ${{ steps.get_release_version.outputs.release_version }} - version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Get release version - id: get_release_version - run: echo ::set-output name=release_version::${GITHUB_REF#refs/*/} - - name: Get release version without prefix - id: get_release_version_without_prefix - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - echo ::set-output name=version_without_prefix::${RELEASE_VERSION:1} - - name: Get highest semver - id: get_highest_semver - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - source infra/scripts/setup-common-functions.sh - SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then - echo ::set-output name=highest_semver_tag::$(get_tag_release -m) - fi - - name: Check output - id: check_output - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - run: | - echo $RELEASE_VERSION - echo $VERSION_WITHOUT_PREFIX - echo $HIGHEST_SEMVER_TAG - build-python-wheel: - name: Build wheels + name: Build wheels and source runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -68,54 +45,34 @@ jobs: registry-url: 'https://registry.npmjs.org' - name: Build UI run: make build-ui - - name: Build wheels - run: | - python -m pip install build - python -m build --wheel --outdir wheelhouse/ - - uses: actions/upload-artifact@v3 - with: - name: wheels - path: ./wheelhouse/*.whl - - build-source-distribution: - name: Build source distribution - runs-on: macos-13 - steps: - - uses: actions/checkout@v4 - - name: Setup Python - id: setup-python - uses: actions/setup-python@v5 + - id: get-version + uses: ./.github/actions/get-semantic-release-version with: - python-version: "3.11" - architecture: x64 - - name: Setup Node - uses: actions/setup-node@v3 - with: - node-version-file: './ui/.nvmrc' - registry-url: 'https://registry.npmjs.org' - - name: Build and install dependencies - # There's a `git restore` in here because `make install-go-ci-dependencies` is actually messing up go.mod & go.sum. - run: | - pip install -U pip setuptools wheel twine - make build-ui - git status - git restore go.mod go.sum - git restore sdk/python/feast/ui/yarn.lock - - name: Build + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + - name: Checkout version and install dependencies + env: + VERSION: ${{ steps.get-version.outputs.release_version }} + PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | - python3 setup.py sdist - - uses: actions/upload-artifact@v3 + git fetch --tags + git checkout ${VERSION} + python -m pip install build + - name: Build feast + run: python -m build + - uses: actions/upload-artifact@v4 with: - name: wheels + name: python-wheels path: dist/* # We add this step so the docker images can be built as part of the pre-release verification steps. build-docker-images: + name: Build Docker images runs-on: ubuntu-latest - needs: get-version + needs: [ build-python-wheel ] strategy: matrix: - component: [feature-server, feature-server-java, feature-transformation-server] + component: [ feature-server-dev, feature-server-java, feature-transformation-server, feast-operator ] env: REGISTRY: feastdev steps: @@ -124,19 +81,27 @@ jobs: uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 + - id: get-version + uses: ./.github/actions/get-semantic-release-version + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} - name: Build image + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + RELEASE_VERSION: ${{ steps.get-version.outputs.release_version }} run: | + echo "Building docker image for ${{ matrix.component }} with version $VERSION_WITHOUT_PREFIX and release version $RELEASE_VERSION" make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} - env: - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} verify-python-wheels: + name: Verify Python wheels runs-on: ${{ matrix.os }} - needs: [build-python-wheel, build-source-distribution, get-version] + needs: [ build-python-wheel ] strategy: matrix: - os: [ubuntu-latest, macos-13 ] - python-version: ["3.9", "3.10", "3.11"] + os: [ ubuntu-latest, macos-13 ] + python-version: [ "3.9", "3.10", "3.11" ] from-source: [ True, False ] env: # this script is for testing servers @@ -151,8 +116,8 @@ jobs: else echo "Succeeded!" fi - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} steps: + - uses: actions/checkout@v4 - name: Setup Python id: setup-python uses: actions/setup-python@v5 @@ -161,7 +126,7 @@ jobs: architecture: x64 - uses: actions/download-artifact@v4.1.7 with: - name: wheels + name: python-wheels path: dist - name: Install OS X dependencies if: matrix.os == 'macos-13' @@ -178,13 +143,25 @@ jobs: if: ${{ matrix.from-source }} run: pip install dist/*tar.gz # Validate that the feast version installed is not development and is the correct version of the tag we ran it off of. + - id: get-version + uses: ./.github/actions/get-semantic-release-version + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} - name: Validate Feast Version + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} run: | + feast version + if ! VERSION_OUTPUT=$(feast version); then + echo "Error: Failed to get Feast version." + exit 1 + fi VERSION_REGEX='[0-9]+\.[0-9]+\.[0-9]+' OUTPUT_REGEX='^Feast SDK Version: "$VERSION_REGEX"$' - VERSION_OUTPUT=$(feast version) VERSION=$(echo $VERSION_OUTPUT | grep -oE "$VERSION_REGEX") OUTPUT=$(echo $VERSION_OUTPUT | grep -E "$REGEX") + echo "Installed Feast Version: $VERSION and using Feast Version: $VERSION_WITHOUT_PREFIX" if [ -n "$OUTPUT" ] && [ "$VERSION" = "$VERSION_WITHOUT_PREFIX" ]; then echo "Correct Feast Version Installed" else diff --git a/.github/workflows/java_master_only.yml b/.github/workflows/java_master_only.yml index 2775f500f32..0307034bdb1 100644 --- a/.github/workflows/java_master_only.yml +++ b/.github/workflows/java_master_only.yml @@ -16,7 +16,7 @@ jobs: component: [feature-server-java] env: MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar - REGISTRY: gcr.io/kf-feast + REGISTRY: quay.io/feastdev-ci steps: - uses: actions/checkout@v4 with: @@ -40,6 +40,12 @@ jobs: run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Build image run: make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${GITHUB_SHA} + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAYIO_CI_USERNAME }} + password: ${{ secrets.QUAYIO_CI_TOKEN }} - name: Push image run: make push-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${GITHUB_SHA} - name: Push development Docker image @@ -72,13 +78,13 @@ jobs: java-version: '11' java-package: jdk architecture: x64 - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-it-maven- - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-ut-maven-${{ hashFiles('**/pom.xml') }} @@ -86,7 +92,7 @@ jobs: ${{ runner.os }}-ut-maven- - name: Test java run: make test-java-with-coverage - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: java-coverage-report path: ${{ github.workspace }}/docs/coverage/java/target/site/jacoco-aggregate/ @@ -126,7 +132,7 @@ jobs: key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} - name: Install Python dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - uses: actions/cache@v4 with: path: ~/.m2/repository diff --git a/.github/workflows/java_pr.yml b/.github/workflows/java_pr.yml index caf31ab47fc..40a2a7a7ec9 100644 --- a/.github/workflows/java_pr.yml +++ b/.github/workflows/java_pr.yml @@ -53,13 +53,13 @@ jobs: java-version: '11' java-package: jdk architecture: x64 - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-it-maven- - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-ut-maven-${{ hashFiles('**/pom.xml') }} @@ -67,7 +67,7 @@ jobs: ${{ runner.os }}-ut-maven- - name: Test java run: make test-java-with-coverage - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: java-coverage-report path: ${{ github.workspace }}/docs/coverage/java/target/site/jacoco-aggregate/ @@ -84,7 +84,7 @@ jobs: component: [ feature-server-java ] env: MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar - REGISTRY: gcr.io/kf-feast + REGISTRY: quay.io/feastdev-ci steps: - uses: actions/checkout@v4 with: @@ -97,11 +97,11 @@ jobs: python-version: "3.11" architecture: x64 - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' + uses: google-github-actions/auth@v2 with: credentials_json: '${{ secrets.GCP_SA_KEY }}' - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 + uses: google-github-actions/setup-gcloud@v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} - run: gcloud auth configure-docker --quiet @@ -137,18 +137,18 @@ jobs: with: python-version: '3.11' architecture: 'x64' - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-it-maven- - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' + uses: google-github-actions/auth@v2 with: credentials_json: '${{ secrets.GCP_SA_KEY }}' - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 + uses: google-github-actions/setup-gcloud@v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} - name: Use gcloud CLI @@ -180,11 +180,11 @@ jobs: path: ${{ steps.uv-cache.outputs.dir }} key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Run integration tests run: make test-java-integration - name: Save report - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: failure() with: name: it-report diff --git a/.github/workflows/lint_pr.yml b/.github/workflows/lint_pr.yml index 81732258455..33fafdcd23d 100644 --- a/.github/workflows/lint_pr.yml +++ b/.github/workflows/lint_pr.yml @@ -14,7 +14,7 @@ jobs: name: Validate PR title runs-on: ubuntu-latest steps: - - uses: amannn/action-semantic-pull-request@v4 + - uses: amannn/action-semantic-pull-request@v5 with: # Must use uppercase subjectPattern: ^(?=[A-Z]).+$ diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index ded9931737a..e3d668b17c5 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,6 +19,6 @@ jobs: run: curl -LsSf https://astral.sh/uv/install.sh | sh - name: Install dependencies run: | - make install-python-ci-dependencies-uv + make install-python-dependencies-ci - name: Lint python run: make lint-python diff --git a/.github/workflows/master_only.yml b/.github/workflows/master_only.yml index 7166246da5f..840a8007236 100644 --- a/.github/workflows/master_only.yml +++ b/.github/workflows/master_only.yml @@ -65,7 +65,7 @@ jobs: path: ${{ steps.uv-cache.outputs.dir }} key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Setup Redis Cluster run: | docker pull vishnunair/docker-redis-cluster:latest @@ -94,40 +94,32 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - component: [ feature-server-java, feature-transformation-server ] + component: [ feature-server-dev, feature-transformation-server, feast-operator ] env: - MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar - REGISTRY: gcr.io/kf-feast + REGISTRY: quay.io/feastdev-ci steps: - uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: install: true - - name: Login to DockerHub - uses: docker/login-action@v1 + - name: Login to Quay.io + uses: docker/login-action@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCP_SA_KEY }}' - - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - name: Use gcloud CLI - run: gcloud info - - run: gcloud auth configure-docker --quiet + registry: quay.io + username: ${{ secrets.QUAYIO_CI_USERNAME }} + password: ${{ secrets.QUAYIO_CI_TOKEN }} - name: Build image run: | make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${GITHUB_SHA} - name: Push image run: | - make push-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${GITHUB_SHA} - - docker tag ${REGISTRY}/${{ matrix.component }}:${GITHUB_SHA} ${REGISTRY}/${{ matrix.component }}:develop - docker push ${REGISTRY}/${{ matrix.component }}:develop \ No newline at end of file + if [[ "${{ matrix.component }}" == "feature-server-dev" ]]; then + docker tag ${REGISTRY}/feature-server:${GITHUB_SHA} ${REGISTRY}/feature-server:develop + docker push ${REGISTRY}/feature-server --all-tags + else + docker tag ${REGISTRY}/${{ matrix.component }}:${GITHUB_SHA} ${REGISTRY}/${{ matrix.component }}:develop + docker push ${REGISTRY}/${{ matrix.component }} --all-tags + fi diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index 11c91af2d7b..886aed44751 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -141,7 +141,7 @@ jobs: if: matrix.os == 'macos-13' run: brew install apache-arrow - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Setup Redis Cluster run: | docker pull vishnunair/docker-redis-cluster:latest @@ -154,4 +154,4 @@ jobs: SNOWFLAKE_CI_PASSWORD: ${{ secrets.SNOWFLAKE_CI_PASSWORD }} SNOWFLAKE_CI_ROLE: ${{ secrets.SNOWFLAKE_CI_ROLE }} SNOWFLAKE_CI_WAREHOUSE: ${{ secrets.SNOWFLAKE_CI_WAREHOUSE }} - run: make test-python-integration \ No newline at end of file + run: make test-python-integration diff --git a/.github/workflows/operator-e2e-integration-tests.yml b/.github/workflows/operator-e2e-integration-tests.yml new file mode 100644 index 00000000000..c23e8095bf7 --- /dev/null +++ b/.github/workflows/operator-e2e-integration-tests.yml @@ -0,0 +1,106 @@ +# .github/workflows/operator-e2e-integration-tests.yml +name: Operator e2e tests + +on: + push: + branches: + - main + pull_request: + types: + - opened + - synchronize + - labeled + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' + +jobs: + operator-e2e-tests: + timeout-minutes: 40 + if: + ((github.event.action == 'labeled' && (github.event.label.name == 'approved' || github.event.label.name == 'lgtm' || github.event.label.name == 'ok-to-test')) || + (github.event.action != 'labeled' && (contains(github.event.pull_request.labels.*.name, 'ok-to-test') || contains(github.event.pull_request.labels.*.name, 'approved') || contains(github.event.pull_request.labels.*.name, 'lgtm')))) && + github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + + services: + kind: + # Specify the Kubernetes version + image: kindest/node:v1.30.6 + + env: + KIND_CLUSTER: "operator-e2e-cluster" + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@v1.3.1 + with: + android: true + dotnet: true + haskell: true + large-packages: false + docker-images: false + swap-storage: false + tool-cache: false + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.9 + + - name: Create KIND cluster + run: | + cat <> $GITHUB_OUTPUT - name: uv cache uses: actions/cache@v4 with: path: ${{ steps.uv-cache.outputs.dir }} - key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} + key: ${{ runner.os }}-${{ matrix.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', matrix.python-version)) }} - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Test local integration tests if: ${{ always() }} # this will guarantee that step won't be canceled and resources won't leak run: make test-python-integration-local + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.9 + - name: Operator Data Source types test + run: make -C infra/feast-operator test-datasources diff --git a/.github/workflows/pr_remote_rbac_integration_tests.yml b/.github/workflows/pr_remote_rbac_integration_tests.yml new file mode 100644 index 00000000000..98fa5a52c58 --- /dev/null +++ b/.github/workflows/pr_remote_rbac_integration_tests.yml @@ -0,0 +1,58 @@ +name: pr-remote-rbac-integration-tests +# This runs the integration tests related to rbac functionality and remote registry and online features. + +on: + pull_request: + types: + - opened + - synchronize + - labeled + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' + +jobs: + remote-rbac-integration-tests-python: + if: + ((github.event.action == 'labeled' && (github.event.label.name == 'approved' || github.event.label.name == 'lgtm' || github.event.label.name == 'ok-to-test')) || + (github.event.action != 'labeled' && (contains(github.event.pull_request.labels.*.name, 'ok-to-test') || contains(github.event.pull_request.labels.*.name, 'approved') || contains(github.event.pull_request.labels.*.name, 'lgtm')))) && + github.event.pull_request.base.repo.full_name == 'feast-dev/feast' + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: [ "3.11" ] + os: [ ubuntu-latest ] + env: + OS: ${{ matrix.os }} + PYTHON: ${{ matrix.python-version }} + steps: + - uses: actions/checkout@v4 + with: + repository: ${{ github.event.repository.full_name }} # Uses the full repository name + ref: ${{ github.ref }} # Uses the ref from the event + token: ${{ secrets.GITHUB_TOKEN }} # Automatically provided token + submodules: recursive + - name: Setup Python + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + - name: Install uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + - name: Get uv cache dir + id: uv-cache + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT + - name: uv cache + uses: actions/cache@v4 + with: + path: ${{ steps.uv-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', matrix.python-version)) }} + - name: Install dependencies + run: make install-python-dependencies-ci + - name: Test rbac and remote feature integration tests + if: ${{ always() }} # this will guarantee that step won't be canceled and resources won't leak + run: make test-python-integration-rbac-remote diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7d5dca8e08b..3526ef4cd7b 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -4,179 +4,49 @@ on: push: tags: - 'v*.*.*' + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + workflow_call: # Allows trigger the workflow from other workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string jobs: - get-version: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - outputs: - release_version: ${{ steps.get_release_version.outputs.release_version }} - version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - steps: - - uses: actions/checkout@v4 - - name: Get release version - id: get_release_version - run: echo ::set-output name=release_version::${GITHUB_REF#refs/*/} - - name: Get release version without prefix - id: get_release_version_without_prefix - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - echo ::set-output name=version_without_prefix::${RELEASE_VERSION:1} - - name: Get highest semver - id: get_highest_semver - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - source infra/scripts/setup-common-functions.sh - SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then - echo ::set-output name=highest_semver_tag::$(get_tag_release -m) - fi - - name: Check output - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - run: | - echo $RELEASE_VERSION - echo $VERSION_WITHOUT_PREFIX - echo $HIGHEST_SEMVER_TAG + publish-python-sdk: + uses: ./.github/workflows/publish_python_sdk.yml + secrets: inherit + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} build-publish-docker-images: - runs-on: ubuntu-latest - needs: [get-version, publish-python-sdk] - strategy: - matrix: - component: [feature-server, feature-server-java, feature-transformation-server, feast-helm-operator, feast-operator] - env: - MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar - REGISTRY: feastdev - steps: - - uses: actions/checkout@v4 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCP_SA_KEY }}' - - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - name: Use gcloud CLI - run: gcloud info - - run: gcloud auth configure-docker --quiet - - name: Build image - run: | - make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} - env: - RELEASE_VERSION: ${{ needs.get-version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ needs.get-version.outputs.highest_semver_tag }} - - name: Push versioned images - env: - RELEASE_VERSION: ${{ needs.get-version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ needs.get-version.outputs.highest_semver_tag }} - run: | - make push-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} - - echo "Only push to latest tag if tag is the highest semver version $HIGHEST_SEMVER_TAG" - if [ "${VERSION_WITHOUT_PREFIX}" = "${HIGHEST_SEMVER_TAG:1}" ] - then - docker tag feastdev/${{ matrix.component }}:${VERSION_WITHOUT_PREFIX} feastdev/${{ matrix.component }}:latest - docker push feastdev/${{ matrix.component }}:latest - fi + uses: ./.github/workflows/publish_images.yml + needs: [ publish-python-sdk ] + secrets: inherit + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} publish-helm-charts: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - needs: get-version - env: - HELM_VERSION: v3.8.0 - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - steps: - - uses: actions/checkout@v4 - - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCP_SA_KEY }}' - - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - run: gcloud auth configure-docker --quiet - - name: Remove previous Helm - run: sudo rm -rf $(which helm) - - name: Install Helm - run: ./infra/scripts/helm/install-helm.sh - - name: Validate Helm chart prior to publishing - run: ./infra/scripts/helm/validate-helm-chart-publish.sh - - name: Validate all version consistency - run: ./infra/scripts/helm/validate-helm-chart-versions.sh $VERSION_WITHOUT_PREFIX - - name: Publish Helm charts - run: ./infra/scripts/helm/push-helm-charts.sh $VERSION_WITHOUT_PREFIX - - build_wheels: - uses: ./.github/workflows/build_wheels.yml - - publish-python-sdk: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - needs: [build_wheels] - steps: - - uses: actions/download-artifact@v4.1.7 - with: - name: wheels - path: dist - - uses: pypa/gh-action-pypi-publish@v1.4.2 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} - - publish-java-sdk: - if: github.repository == 'feast-dev/feast' - container: maven:3.6-jdk-11 - runs-on: ubuntu-latest - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - submodules: 'true' - - name: Set up JDK 11 - uses: actions/setup-java@v1 - with: - java-version: '11' - java-package: jdk - architecture: x64 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - architecture: 'x64' - - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-it-maven- - - name: Publish java sdk - env: - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - GPG_PUBLIC_KEY: ${{ secrets.GPG_PUBLIC_KEY }} - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - MAVEN_SETTINGS: ${{ secrets.MAVEN_SETTINGS }} - run: | - echo -n "$GPG_PUBLIC_KEY" > /root/public-key - echo -n "$GPG_PRIVATE_KEY" > /root/private-key - mkdir -p /root/.m2/ - echo -n "$MAVEN_SETTINGS" > /root/.m2/settings.xml - infra/scripts/publish-java-sdk.sh --revision ${VERSION_WITHOUT_PREFIX} --gpg-key-import-dir /root + uses: ./.github/workflows/publish_helm_charts.yml + needs: [ publish-python-sdk ] + secrets: inherit + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} diff --git a/.github/workflows/publish_helm_charts.yml b/.github/workflows/publish_helm_charts.yml new file mode 100644 index 00000000000..9d28e2efd2f --- /dev/null +++ b/.github/workflows/publish_helm_charts.yml @@ -0,0 +1,65 @@ +name: publish images + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + workflow_call: # Allows trigger of the workflow from another workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + publish-helm-charts: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + env: + HELM_VERSION: v3.8.0 + steps: + - uses: actions/checkout@v4 + with: + submodules: 'true' + - id: get-version + uses: ./.github/actions/get-semantic-release-version + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + - name: Authenticate to Google Cloud + uses: 'google-github-actions/auth@v1' + with: + credentials_json: '${{ secrets.GCP_SA_KEY }}' + - name: Set up gcloud SDK + uses: google-github-actions/setup-gcloud@v1 + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + - run: gcloud auth configure-docker --quiet + - name: Remove previous Helm + run: sudo rm -rf $(which helm) + - name: Install Helm + run: ./infra/scripts/helm/install-helm.sh + - name: Validate Helm chart prior to publishing + run: ./infra/scripts/helm/validate-helm-chart-publish.sh + - name: Validate all version consistency + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + run: ./infra/scripts/helm/validate-helm-chart-versions.sh $VERSION_WITHOUT_PREFIX + - name: Publish Helm charts + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + run: ./infra/scripts/helm/push-helm-charts.sh $VERSION_WITHOUT_PREFIX + diff --git a/.github/workflows/publish_images.yml b/.github/workflows/publish_images.yml new file mode 100644 index 00000000000..f605fb20df1 --- /dev/null +++ b/.github/workflows/publish_images.yml @@ -0,0 +1,82 @@ +name: build and publish docker images + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + workflow_call: # Allows trigger of the workflow from another workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + build-publish-docker-images: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + strategy: + matrix: + component: [ feature-server, feature-server-java, feature-transformation-server, feast-helm-operator, feast-operator ] + env: + MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar + REGISTRY: feastdev + steps: + - uses: actions/checkout@v4 + with: + submodules: 'true' + - id: get-version + uses: ./.github/actions/get-semantic-release-version + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Authenticate to Google Cloud + uses: 'google-github-actions/auth@v1' + with: + credentials_json: '${{ secrets.GCP_SA_KEY }}' + - name: Set up gcloud SDK + uses: google-github-actions/setup-gcloud@v1 + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + - name: Use gcloud CLI + run: gcloud info + - run: gcloud auth configure-docker --quiet + - name: Build image + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + run: | + make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} + - name: Push versioned images + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + HIGHEST_SEMVER_TAG: ${{ steps.get-version.outputs.highest_semver_tag }} + run: | + make push-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} + + echo "Only push to latest tag if tag is the highest semver version $HIGHEST_SEMVER_TAG" + if [ "${VERSION_WITHOUT_PREFIX}" = "${HIGHEST_SEMVER_TAG:1}" ] + then + docker tag feastdev/${{ matrix.component }}:${VERSION_WITHOUT_PREFIX} feastdev/${{ matrix.component }}:latest + docker push feastdev/${{ matrix.component }}:latest + fi diff --git a/.github/workflows/publish_java_sdk.yml b/.github/workflows/publish_java_sdk.yml new file mode 100644 index 00000000000..f89c384b126 --- /dev/null +++ b/.github/workflows/publish_java_sdk.yml @@ -0,0 +1,69 @@ +name: publish java sdk + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + workflow_call: # Allows trigger of the workflow from another workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + + +jobs: + publish-java-sdk: + if: github.repository == 'feast-dev/feast' + container: maven:3.6-jdk-11 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: 'true' + - id: get-version + uses: ./.github/actions/get-semantic-release-version + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + java-package: jdk + architecture: x64 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + architecture: 'x64' + - uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-it-maven- + - name: Publish java sdk + env: + VERSION_WITHOUT_PREFIX: ${{ steps.get-version.outputs.version_without_prefix }} + GPG_PUBLIC_KEY: ${{ secrets.GPG_PUBLIC_KEY }} + GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} + MAVEN_SETTINGS: ${{ secrets.MAVEN_SETTINGS }} + run: | + echo -n "$GPG_PUBLIC_KEY" > /root/public-key + echo -n "$GPG_PRIVATE_KEY" > /root/private-key + mkdir -p /root/.m2/ + echo -n "$MAVEN_SETTINGS" > /root/.m2/settings.xml + infra/scripts/publish-java-sdk.sh --revision ${VERSION_WITHOUT_PREFIX} --gpg-key-import-dir /root diff --git a/.github/workflows/publish_python_sdk.yml b/.github/workflows/publish_python_sdk.yml new file mode 100644 index 00000000000..03d0e989b49 --- /dev/null +++ b/.github/workflows/publish_python_sdk.yml @@ -0,0 +1,49 @@ +name: publish python sdk + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + + workflow_call: # Allows trigger of the workflow from another workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + type: string + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + build-wheels: + uses: ./.github/workflows/build_wheels.yml + secrets: inherit + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + + publish-python-sdk: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + needs: [ build-wheels ] + steps: + - uses: actions/download-artifact@v4.1.7 + with: + name: python-wheels + path: dist + - name: Publish package to PyPI + uses: pypa/gh-action-pypi-publish@v1.4.2 + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8b0eccd9a92..1ae75382905 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,10 +18,27 @@ on: required: true default: true type: boolean + workflow_call: + inputs: + dry_run: + description: 'Dry Run' + required: true + default: true + type: boolean + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + publish_ui: + description: 'Publish to NPM?' + required: true + default: true + type: boolean jobs: - get_dry_release_versions: + if: github.repository == 'feast-dev/feast' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ github.event.inputs.token }} @@ -36,7 +53,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v3 with: - node-version-file: './ui/.nvmrc' + node-version: "lts/*" - name: Release (Dry Run) id: get_versions run: | @@ -62,7 +79,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v3 with: - node-version-file: './ui/.nvmrc' + node-version: "lts/*" - name: Bump file versions run: python ./infra/scripts/release/bump_file_versions.py ${CURRENT_VERSION} ${NEXT_VERSION} - name: Install yarn dependencies @@ -91,14 +108,12 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: 1.21.x + go-version: 1.22.9 - name: Build & version operator-specific release files - run: | - cd infra/feast-operator/ - make build-installer bundle + run: make -C infra/feast-operator build-installer bundle publish-web-ui-npm: - needs: [validate_version_bumps, get_dry_release_versions] + needs: [ validate_version_bumps, get_dry_release_versions ] runs-on: ubuntu-latest env: # This publish is working using an NPM automation token to bypass 2FA @@ -121,7 +136,7 @@ jobs: run: yarn build:lib - name: Publish UI package working-directory: ./ui - if: github.event.inputs.dry_run == 'false' && github.event.inputs.publish_ui == 'true' + if: github.event.inputs.dry_run == 'false' && github.event.inputs.publish_ui == 'true' run: npm publish env: # This publish is working using an NPM automation token to bypass 2FA @@ -138,25 +153,62 @@ jobs: GIT_COMMITTER_NAME: feast-ci-bot GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co steps: - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Setup Node.js - uses: actions/setup-node@v3 - with: - node-version-file: './ui/.nvmrc' - - name: Set up Homebrew - id: set-up-homebrew - uses: Homebrew/actions/setup-homebrew@master - - name: Setup Helm-docs - run: | - brew install norwoodj/tap/helm-docs - - name: Release (Dry Run) - if: github.event.inputs.dry_run == 'true' - run: | - npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run - - name: Release - if: github.event.inputs.dry_run == 'false' - run: | - npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release + - name: Checkout + uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version-file: './ui/.nvmrc' + - name: Set up Homebrew + id: set-up-homebrew + uses: Homebrew/actions/setup-homebrew@master + - name: Setup Helm-docs + run: | + brew install norwoodj/tap/helm-docs + - name: Install Go + uses: actions/setup-go@v2 + with: + go-version: 1.22.9 + - name: Release (Dry Run) + if: github.event.inputs.dry_run == 'true' + run: | + npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run + - name: Release + if: github.event.inputs.dry_run == 'false' + run: | + npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release + + + update_stable_branch: + name: Update Stable Branch after release + if: github.event.inputs.dry_run == 'false' + runs-on: ubuntu-latest + needs: release + env: + GITHUB_TOKEN: ${{ github.event.inputs.token }} + GIT_AUTHOR_NAME: feast-ci-bot + GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co + GIT_COMMITTER_NAME: feast-ci-bot + GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co + GITHUB_REPOSITORY: ${{ github.repository }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Git credentials + run: | + git config --global user.name "$GIT_AUTHOR_NAME" + git config --global user.email "$GIT_AUTHOR_EMAIL" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY} + + - name: Fetch all branches + run: git fetch --all + + - name: Reset stable branch to match release branch + run: | + git checkout -B stable origin/${GITHUB_REF#refs/heads/} + git push origin stable --force \ No newline at end of file diff --git a/.github/workflows/smoke_tests.yml b/.github/workflows/smoke_tests.yml index 782f8b3f511..a7eb1966269 100644 --- a/.github/workflows/smoke_tests.yml +++ b/.github/workflows/smoke_tests.yml @@ -1,6 +1,11 @@ name: smoke-tests -on: [pull_request] +on: + pull_request: + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' jobs: unit-test-python: runs-on: ${{ matrix.os }} @@ -26,13 +31,15 @@ jobs: - name: Get uv cache dir id: uv-cache run: | - echo "::set-output name=dir::$(uv cache dir)" + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - name: uv cache uses: actions/cache@v4 with: path: ${{ steps.uv-cache.outputs.dir }} key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} - name: Install dependencies - run: make install-python-dependencies-uv + run: | + uv pip sync --system sdk/python/requirements/py${{ matrix.python-version }}-requirements.txt + uv pip install --system --no-deps . - name: Test Imports - run: python -c "from feast import cli" \ No newline at end of file + run: python -c "from feast import cli" diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index af23c8d808c..3ece863de3b 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -1,6 +1,11 @@ name: unit-tests -on: [pull_request] +on: + pull_request: + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' jobs: unit-test-python: runs-on: ${{ matrix.os }} @@ -8,10 +13,15 @@ jobs: fail-fast: false matrix: python-version: [ "3.9", "3.10", "3.11"] - os: [ ubuntu-latest, macos-13 ] + os: [ ubuntu-latest, macos-13, macos-14 ] exclude: - os: macos-13 python-version: "3.9" + - os: macos-14 + python-version: "3.9" + - os: macos-14 + python-version: "3.10" + env: OS: ${{ matrix.os }} PYTHON: ${{ matrix.python-version }} @@ -28,15 +38,15 @@ jobs: curl -LsSf https://astral.sh/uv/install.sh | sh - name: Get uv cache dir id: uv-cache - run: | - echo "::set-output name=dir::$(uv cache dir)" + run: | + echo "dir=$(uv cache dir)" >> $GITHUB_OUTPUT - name: uv cache uses: actions/cache@v4 with: path: ${{ steps.uv-cache.outputs.dir }} key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-uv-${{ hashFiles(format('**/py{0}-ci-requirements.txt', env.PYTHON)) }} - name: Install dependencies - run: make install-python-ci-dependencies-uv + run: make install-python-dependencies-ci - name: Test Python run: make test-python-unit @@ -47,7 +57,7 @@ jobs: NPM_TOKEN: ${{ secrets.NPM_TOKEN }} steps: - uses: actions/checkout@v4 - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: './ui/.nvmrc' registry-url: 'https://registry.npmjs.org' diff --git a/.github/workflows/update_stable_branch.yml b/.github/workflows/update_stable_branch.yml new file mode 100644 index 00000000000..b09af9cc2eb --- /dev/null +++ b/.github/workflows/update_stable_branch.yml @@ -0,0 +1,40 @@ +name: Update Stable Branch + +on: + workflow_dispatch: + inputs: + token: + description: 'GitHub token to authenticate' + required: true + type: string + +jobs: + update_stable_branch: + name: Update Stable Branch after release + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ github.event.inputs.token }} + GIT_AUTHOR_NAME: feast-ci-bot + GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co + GIT_COMMITTER_NAME: feast-ci-bot + GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co + GITHUB_REPOSITORY: ${{ github.repository }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Git credentials + run: | + git config --global user.name "$GIT_AUTHOR_NAME" + git config --global user.email "$GIT_AUTHOR_EMAIL" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY} + + - name: Fetch all branches + run: git fetch --all + + - name: Reset stable branch to match release branch + run: | + git checkout -B stable origin/${GITHUB_REF#refs/heads/} + git push origin stable --force diff --git a/.gitignore b/.gitignore index d558463c657..e33fb46cb07 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ scratch* ### Local Environment ### *local*.env +tools ### Secret ### **/service_account.json @@ -101,6 +102,7 @@ htmlcov/ .cache nosetests.xml coverage.xml +coverage.out *.cover .hypothesis/ .pytest_cache/ @@ -222,4 +224,7 @@ ui/.vercel **/yarn-error.log* # Go subprocess binaries (built during feast pip package building) -sdk/python/feast/binaries/ \ No newline at end of file +sdk/python/feast/binaries/ + +# ignore the bin directory under feast operator. +infra/feast-operator/bin \ No newline at end of file diff --git a/.gitpod.yml b/.gitpod.yml index 480baefede4..6e0c28da94d 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -8,7 +8,7 @@ tasks: uv pip install pre-commit pre-commit install --hook-type pre-commit --hook-type pre-push source .venv/bin/activate - export PYTHON=3.10 && make install-python-ci-dependencies-uv-venv + export PYTHON=3.10 && make install-python-dependencies-dev # git config --global alias.ci 'commit -s' # git config --global alias.sw switch # git config --global alias.st status diff --git a/.releaserc.js b/.releaserc.js index f2be2440057..61c6813442d 100644 --- a/.releaserc.js +++ b/.releaserc.js @@ -41,7 +41,7 @@ module.exports = { "verifyReleaseCmd": "./infra/scripts/validate-release.sh ${nextRelease.type} " + current_branch, // Bump all version files and build UI / update yarn.lock / helm charts - "prepareCmd": "python ./infra/scripts/release/bump_file_versions.py ${lastRelease.version} ${nextRelease.version}; make build-ui; make build-helm-docs" + "prepareCmd": "python ./infra/scripts/release/bump_file_versions.py ${lastRelease.version} ${nextRelease.version}; make build-ui; make build-helm-docs; make -C infra/feast-operator build-installer bundle; rm -rf infra/feast-operator/bin" }], ["@semantic-release/release-notes-generator", { diff --git a/CHANGELOG.md b/CHANGELOG.md index 8368cf67185..053edc3df50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,178 @@ # Changelog +# [0.46.0](https://github.com/feast-dev/feast/compare/v0.45.0...v0.46.0) (2025-02-17) + + +### Bug Fixes + +* Add scylladb to online stores list in docs ([#5061](https://github.com/feast-dev/feast/issues/5061)) ([08183ed](https://github.com/feast-dev/feast/commit/08183ed38581eb655e2f6055c50b9223fcf9662e)) +* Changed feast operator to set status of featurestore cr to ready based on deployment.status = available ([#5020](https://github.com/feast-dev/feast/issues/5020)) ([fce0d35](https://github.com/feast-dev/feast/commit/fce0d35bc00553269fff6abb7a16897577a2421f)) +* Ensure Postgres queries are committed or autocommit is used ([#5039](https://github.com/feast-dev/feast/issues/5039)) ([46f8d7a](https://github.com/feast-dev/feast/commit/46f8d7aa87cfaf36d17c162c4f41cd983a2938d5)) +* Fixing the release workflow to refresh the stable branch when the release is not running in the dry run mode. ([#5057](https://github.com/feast-dev/feast/issues/5057)) ([a13fa9b](https://github.com/feast-dev/feast/commit/a13fa9bd18be94b349954e5db66fd30ba4db1d1e)) +* Operator - make onlineStore the default service ([#5044](https://github.com/feast-dev/feast/issues/5044)) ([6c92447](https://github.com/feast-dev/feast/commit/6c92447d1507bff02451f77f134df0a24cbd8036)) +* Operator - resolve infinite reconciler loop in authz controller ([#5056](https://github.com/feast-dev/feast/issues/5056)) ([11e4548](https://github.com/feast-dev/feast/commit/11e45482b0cace1f3c3a0ddc567a8a1172d6792a)) +* Resolve module on windows ([#4827](https://github.com/feast-dev/feast/issues/4827)) ([efbffa4](https://github.com/feast-dev/feast/commit/efbffa4be0f38166ff35f133a9b69bcbd243debd)) +* Setting the github_token explicitly to see if that solves the problem. ([#5012](https://github.com/feast-dev/feast/issues/5012)) ([3834ffa](https://github.com/feast-dev/feast/commit/3834ffa31f52b9a68b27a9f898538827ee8e5c39)) +* Validate entities when running get_online_features ([#5031](https://github.com/feast-dev/feast/issues/5031)) ([3bb0dca](https://github.com/feast-dev/feast/commit/3bb0dca1692fb7087e967a9fc33a4b08720b13d2)) + + +### Features + +* Add SQLite retrieve_online_documents_v2 ([#5032](https://github.com/feast-dev/feast/issues/5032)) ([0fffe21](https://github.com/feast-dev/feast/commit/0fffe211be9db18d318634f47bc9401fd6e218a0)) +* Adding Click command to display configuration details ([#5036](https://github.com/feast-dev/feast/issues/5036)) ([ae68e4d](https://github.com/feast-dev/feast/commit/ae68e4de0c184dc2990ea7e8d08d2d7f1613b06f)) +* Adding volumes and volumeMounts support to Feature Store CR. ([#4983](https://github.com/feast-dev/feast/issues/4983)) ([ec6f1b7](https://github.com/feast-dev/feast/commit/ec6f1b750ed49ef36c5e3aa9f8db1d030bf80047)) +* Moving the job to seperate action so that we can test it easily. ([#5013](https://github.com/feast-dev/feast/issues/5013)) ([b9325b7](https://github.com/feast-dev/feast/commit/b9325b7f42b8866fa43b1c7567e3288dd589020f)) +* Operator - make server container creation explicit in the CR ([#5024](https://github.com/feast-dev/feast/issues/5024)) ([b16fb40](https://github.com/feast-dev/feast/commit/b16fb400fd63fdc0168cb1f845638fc003724fd4)) + +# [0.45.0](https://github.com/feast-dev/feast/compare/v0.44.0...v0.45.0) (2025-02-04) + + +### Features + +* Changing refresh stable branch from step to a job. Using github credentials bot so that we can push the changes. ([#5011](https://github.com/feast-dev/feast/issues/5011)) ([7335e26](https://github.com/feast-dev/feast/commit/7335e266455561ebcb5ce8e318a79661e509a1c2)) + +# [0.44.0](https://github.com/feast-dev/feast/compare/v0.43.0...v0.44.0) (2025-02-04) + + +### Bug Fixes + +* Adding periodic check to fix the sporadic failures of the operator e2e tests. ([#4952](https://github.com/feast-dev/feast/issues/4952)) ([1d086be](https://github.com/feast-dev/feast/commit/1d086beb9f9726f68ababace87c58c2cc6412ca3)) +* Adding the feast-operator/bin to the .gitignore directory. Somehow it… ([#5005](https://github.com/feast-dev/feast/issues/5005)) ([1a027ee](https://github.com/feast-dev/feast/commit/1a027eec3dc38ce8a949aca842c91742b0f68b47)) +* Changed Env Vars for e2e tests ([#4975](https://github.com/feast-dev/feast/issues/4975)) ([fa0084f](https://github.com/feast-dev/feast/commit/fa0084f2ed0e9d41ff813538ee63dd4ee7371e6c)) +* Fix GitHub Actions to pass authentication ([#4963](https://github.com/feast-dev/feast/issues/4963)) ([22b9138](https://github.com/feast-dev/feast/commit/22b9138a3c0040f5779f7218522f2d96e750fbbf)), closes [#4937](https://github.com/feast-dev/feast/issues/4937) [#4939](https://github.com/feast-dev/feast/issues/4939) [#4941](https://github.com/feast-dev/feast/issues/4941) [#4940](https://github.com/feast-dev/feast/issues/4940) [#4943](https://github.com/feast-dev/feast/issues/4943) [#4944](https://github.com/feast-dev/feast/issues/4944) [#4945](https://github.com/feast-dev/feast/issues/4945) [#4946](https://github.com/feast-dev/feast/issues/4946) [#4947](https://github.com/feast-dev/feast/issues/4947) [#4948](https://github.com/feast-dev/feast/issues/4948) [#4951](https://github.com/feast-dev/feast/issues/4951) [#4954](https://github.com/feast-dev/feast/issues/4954) [#4957](https://github.com/feast-dev/feast/issues/4957) [#4958](https://github.com/feast-dev/feast/issues/4958) [#4959](https://github.com/feast-dev/feast/issues/4959) [#4960](https://github.com/feast-dev/feast/issues/4960) [#4962](https://github.com/feast-dev/feast/issues/4962) +* Fix showing selected navigation item in UI sidebar ([#4969](https://github.com/feast-dev/feast/issues/4969)) ([8ac6a85](https://github.com/feast-dev/feast/commit/8ac6a8547361708fec00a11a33c48ca3ae25f311)) +* Invalid column names in get_historical_features when there are field mappings on join keys ([#4886](https://github.com/feast-dev/feast/issues/4886)) ([c9aca2d](https://github.com/feast-dev/feast/commit/c9aca2d42254d1c4dfcc778b0d90303329901bd0)) +* Read project data from the 'projects' key while loading the registry state in the Feast UI ([#4772](https://github.com/feast-dev/feast/issues/4772)) ([cb81939](https://github.com/feast-dev/feast/commit/cb8193945932b98d5b8f750ac07d58c034870565)) +* Remove grpcurl dependency from Operator ([#4972](https://github.com/feast-dev/feast/issues/4972)) ([439e0b9](https://github.com/feast-dev/feast/commit/439e0b98819ef222b35617dfd6c97f04ca049f2f)) +* Removed the dry-run flag to test and we will add it back later. ([#5007](https://github.com/feast-dev/feast/issues/5007)) ([d112b52](https://github.com/feast-dev/feast/commit/d112b529d618f19a5602039b6d347915d7e75b88)) +* Render UI navigation items as links instead of buttons ([#4970](https://github.com/feast-dev/feast/issues/4970)) ([1267703](https://github.com/feast-dev/feast/commit/1267703d099491393ca212c38f1a63a36fe6c443)) +* Resolve Operator CRD bloat due to long field descriptions ([#4985](https://github.com/feast-dev/feast/issues/4985)) ([7593bb3](https://github.com/feast-dev/feast/commit/7593bb3ec8871dbb83403461e0b6f6863d64abc6)) +* Update manifest to add feature server image for odh ([#4973](https://github.com/feast-dev/feast/issues/4973)) ([6a1c102](https://github.com/feast-dev/feast/commit/6a1c1029b5462aaa42c82fdad421176ad1692f81)) +* Updating release workflows to refer to yml instead of yaml ([#4935](https://github.com/feast-dev/feast/issues/4935)) ([02b0a68](https://github.com/feast-dev/feast/commit/02b0a68a435ab01f26b20824f3f8a4dd4e21da8d)) +* Use locally built feast-ui package in dev feature-server image ([#4998](https://github.com/feast-dev/feast/issues/4998)) ([0145e55](https://github.com/feast-dev/feast/commit/0145e5501e2c7854628d204cb515270fac3bee7d)) + + +### Features + +* Added OWNERS file for OpenshiftCI ([#4991](https://github.com/feast-dev/feast/issues/4991)) ([86a2ee8](https://github.com/feast-dev/feast/commit/86a2ee8e3ce1cd4432749928fda7a4386dc7ce0f)) +* Adding Milvus demo to examples ([#4910](https://github.com/feast-dev/feast/issues/4910)) ([2daf852](https://github.com/feast-dev/feast/commit/2daf8527c4539a007d639ac6e3061767a9c45110)) +* Adding retrieve_online_documents endpoint ([#5002](https://github.com/feast-dev/feast/issues/5002)) ([6607d3d](https://github.com/feast-dev/feast/commit/6607d3dfa1041638d3896b25cb98677412889724)) +* Adding support to return additional features from vector retrieval for Milvus db ([#4971](https://github.com/feast-dev/feast/issues/4971)) ([6ce08d3](https://github.com/feast-dev/feast/commit/6ce08d31863b12a7a92bf5207172a05f8da077d1)) +* Creating/updating the stable branch after the release. ([#5003](https://github.com/feast-dev/feast/issues/5003)) ([e9b53cc](https://github.com/feast-dev/feast/commit/e9b53cc83ee51b906423ec2e1fac36e159d55db2)) +* Implementing online_read for MilvusOnlineStore ([#4996](https://github.com/feast-dev/feast/issues/4996)) ([92dde13](https://github.com/feast-dev/feast/commit/92dde1311c419dc3d8cbb534ed2e706fdeae1e26)) +* Improve exception message for unsupported Snowflake data types ([#4779](https://github.com/feast-dev/feast/issues/4779)) ([5992364](https://github.com/feast-dev/feast/commit/59923645e4f6a64a49bcecb7da503528af850d0f)) +* Operator add feast ui deployment ([#4930](https://github.com/feast-dev/feast/issues/4930)) ([b026d0c](https://github.com/feast-dev/feast/commit/b026d0ce30d7ce9b621679fbb33f2a9c0edaad84)) +* Updating documents to highlight v2 api for Vector Similarity Se… ([#5000](https://github.com/feast-dev/feast/issues/5000)) ([32b82a4](https://github.com/feast-dev/feast/commit/32b82a4b59bceaf9eb6662f35e77d0cae0d36550)) + +# [0.43.0](https://github.com/feast-dev/feast/compare/v0.42.0...v0.43.0) (2025-01-20) + + +### Bug Fixes + +* Add k8s module to feature-server image ([#4839](https://github.com/feast-dev/feast/issues/4839)) ([f565565](https://github.com/feast-dev/feast/commit/f565565e0132ea5170221dc6af2e93a5dc3e750d)) +* Adding input to workflow ([e3e8c97](https://github.com/feast-dev/feast/commit/e3e8c975b4b9891913d0be8d50df909d4d243191)) +* Change image push to use --all-tags option ([#4926](https://github.com/feast-dev/feast/issues/4926)) ([02458fd](https://github.com/feast-dev/feast/commit/02458fd7aad49d5daa5b9836f5abdc4dd81d07bb)) +* Fix integration build/push for images ([#4923](https://github.com/feast-dev/feast/issues/4923)) ([695e49b](https://github.com/feast-dev/feast/commit/695e49bd93a4c8af2ce5839586295b5e74e1b98e)) +* Fix integration operator push ([#4924](https://github.com/feast-dev/feast/issues/4924)) ([13c7267](https://github.com/feast-dev/feast/commit/13c7267b555cca4f3361f34fb384a6fd9f27dedf)) +* Fix release.yml ([#4845](https://github.com/feast-dev/feast/issues/4845)) ([b4768a8](https://github.com/feast-dev/feast/commit/b4768a81b94352de037dc305df309fcf06fd2973)) +* Fixing some of the warnings with the github actions ([#4763](https://github.com/feast-dev/feast/issues/4763)) ([1119439](https://github.com/feast-dev/feast/commit/1119439c49bc90e62f02da078901509c1d740236)) +* Improve status.applied updates & add offline pvc unit test ([#4871](https://github.com/feast-dev/feast/issues/4871)) ([3f49517](https://github.com/feast-dev/feast/commit/3f49517dfeabea5ffbd3f6b589cc0f2280ee4018)) +* Made fixes to Go Operator DB persistence ([#4830](https://github.com/feast-dev/feast/issues/4830)) ([cdc0753](https://github.com/feast-dev/feast/commit/cdc075360242bfdf3812d394a3c9c550f81b0f98)) +* Make transformation_service_endpoint configuration optional ([#4880](https://github.com/feast-dev/feast/issues/4880)) ([c62377b](https://github.com/feast-dev/feast/commit/c62377bc095a83022d13e5a8a3a9413d7e0f3e2c)) +* Move pre-release image builds to quay.io, retire gcr.io pushes ([#4922](https://github.com/feast-dev/feast/issues/4922)) ([40b975b](https://github.com/feast-dev/feast/commit/40b975b8468de2678af8b191e93495e51af0b6aa)) +* Performance regression in /get-online-features ([#4892](https://github.com/feast-dev/feast/issues/4892)) ([0db56a2](https://github.com/feast-dev/feast/commit/0db56a2cb5888bc21dbdb331e2b5fc3d33508424)) +* Refactor Operator to deploy all feast services to the same Deployment/Pod ([#4863](https://github.com/feast-dev/feast/issues/4863)) ([88854dd](https://github.com/feast-dev/feast/commit/88854dd56fd0becf4a5d5293735a1c9ba394d53d)) +* Remove unnecessary google cloud steps & upgrade docker action versions ([#4925](https://github.com/feast-dev/feast/issues/4925)) ([32aaf9a](https://github.com/feast-dev/feast/commit/32aaf9aba96c53e1c69577312982472182e99659)) +* Remove verifyClient TLS offlineStore option from the Operator ([#4847](https://github.com/feast-dev/feast/issues/4847)) ([79fa247](https://github.com/feast-dev/feast/commit/79fa247026dd95e75a19308d437997310d061b35)) +* Resolving syntax error while querying a feature view with column name starting with a number and BigQuery as data source ([#4908](https://github.com/feast-dev/feast/issues/4908)) ([d3495a0](https://github.com/feast-dev/feast/commit/d3495a09083b1e6a746fff8444f0bbb887d6ac8b)) +* Updated python-helm-demo example to use MinIO instead of GS ([#4691](https://github.com/feast-dev/feast/issues/4691)) ([31afd99](https://github.com/feast-dev/feast/commit/31afd99c0969002fe04982e40cf7a857960f7abf)) + + +### Features + +* Add date field support to spark ([#4913](https://github.com/feast-dev/feast/issues/4913)) ([a8aeb79](https://github.com/feast-dev/feast/commit/a8aeb79830f12358c2355be44fca68e61992cb46)) +* Add date support when converting from python to feast types ([#4918](https://github.com/feast-dev/feast/issues/4918)) ([bd9f071](https://github.com/feast-dev/feast/commit/bd9f071017756e205fbabe6af0d38dfaa9be3d7b)) +* Add duckdb extra to multicloud release image ([#4862](https://github.com/feast-dev/feast/issues/4862)) ([b539eba](https://github.com/feast-dev/feast/commit/b539ebaad5ec2c1a199fe08ceccd206754ce82f0)) +* Add milvus package to release image & option to Operator ([#4870](https://github.com/feast-dev/feast/issues/4870)) ([ef724b6](https://github.com/feast-dev/feast/commit/ef724b66bd4d5f355b055d6d81525c4a17ce94c1)) +* Add Milvus Vector Database Implementation ([#4751](https://github.com/feast-dev/feast/issues/4751)) ([22c7b58](https://github.com/feast-dev/feast/commit/22c7b58f9590a357eaa57c77d5ed351f1fa07501)) +* Add online/offline replica support ([#4812](https://github.com/feast-dev/feast/issues/4812)) ([b97da6c](https://github.com/feast-dev/feast/commit/b97da6ca3a08e3f0fc35552dd7f0bd3b59083f35)) +* Added pvc accessModes support ([#4851](https://github.com/feast-dev/feast/issues/4851)) ([a73514c](https://github.com/feast-dev/feast/commit/a73514cd4f7fecbc89679566e0f8a0af16b6b06d)) +* Adding EnvFrom support for the OptionalConfigs type to the Go Operator ([#4909](https://github.com/feast-dev/feast/issues/4909)) ([e01e510](https://github.com/feast-dev/feast/commit/e01e51076f5d8fe5be459037bd254e6f94e0cb0f)) +* Adding Feature Server to components docs ([#4868](https://github.com/feast-dev/feast/issues/4868)) ([f95e54b](https://github.com/feast-dev/feast/commit/f95e54bdbee80be6b0e290a02e56f92daac2cf64)) +* Adding features field to retrieve_online_features to return mor… ([#4869](https://github.com/feast-dev/feast/issues/4869)) ([7df287e](https://github.com/feast-dev/feast/commit/7df287e8c0f5ec3ab3fa88fd5576f636053a3769)) +* Adding packages for Milvus Online Store ([#4854](https://github.com/feast-dev/feast/issues/4854)) ([49171bd](https://github.com/feast-dev/feast/commit/49171bd53fb8bfc325eb7167cac8cae18a28bd63)) +* Adding vector_search parameter to fields ([#4855](https://github.com/feast-dev/feast/issues/4855)) ([739eaa7](https://github.com/feast-dev/feast/commit/739eaa78e6d995ee0750292d2f8d81886a3f9829)) +* Feast Operator support log level configuration for services ([#4808](https://github.com/feast-dev/feast/issues/4808)) ([19424bc](https://github.com/feast-dev/feast/commit/19424bcc975d90d922791b5bd0da6ac13955c0c5)) +* Go Operator - Parsing the output to go structs ([#4832](https://github.com/feast-dev/feast/issues/4832)) ([732865f](https://github.com/feast-dev/feast/commit/732865f20e7fae7a46f54be7bc469ce2b3bc44e2)) +* Implement `date_partition_column` for `SparkSource` ([#4844](https://github.com/feast-dev/feast/issues/4844)) ([c5ffa03](https://github.com/feast-dev/feast/commit/c5ffa037cb030c64d6e25995199cf762cc0e9b2a)) +* Loading the CA trusted store certificate into Feast to verify the public certificate. ([#4852](https://github.com/feast-dev/feast/issues/4852)) ([132ce2a](https://github.com/feast-dev/feast/commit/132ce2a6c9e3ff8544680d5237e9e1523d988d7e)) +* Operator E2E test to validate FeatureStore custom resource using remote registry ([#4822](https://github.com/feast-dev/feast/issues/4822)) ([d558ef7](https://github.com/feast-dev/feast/commit/d558ef7e19aa561c37c38d4d0da2b8c1467414f5)) +* Operator improvements ([#4928](https://github.com/feast-dev/feast/issues/4928)) ([7a1f4dd](https://github.com/feast-dev/feast/commit/7a1f4dd8b96a40d055467e1e5f72c91167e40484)) +* Removing the tls_verify_client flag from feast cli for offline server. ([#4842](https://github.com/feast-dev/feast/issues/4842)) ([8320e23](https://github.com/feast-dev/feast/commit/8320e23eb85cc419ef8aa0fdc07efa81857e0345)) +* Separating the RBAC and Remote related integration tests. ([#4905](https://github.com/feast-dev/feast/issues/4905)) ([76e1e21](https://github.com/feast-dev/feast/commit/76e1e2178c285886136e8f2fc4436302e4291715)) +* Snyk vulnerability issues fix. ([#4867](https://github.com/feast-dev/feast/issues/4867)) ([dbc9207](https://github.com/feast-dev/feast/commit/dbc92070c8ef6b9e4e53d89ec03090bf30bd0f60)), closes [#6](https://github.com/feast-dev/feast/issues/6) [#3](https://github.com/feast-dev/feast/issues/3) [#4](https://github.com/feast-dev/feast/issues/4) +* Use ASOF JOIN in Snowflake offline store query ([#4850](https://github.com/feast-dev/feast/issues/4850)) ([8f591a2](https://github.com/feast-dev/feast/commit/8f591a235ba5bd9d1bc598195f46c7e12e437a2c)) + + +### Reverts + +* Revert "chore: Add Milvus to pr_integration_tests.yml" ([#4900](https://github.com/feast-dev/feast/issues/4900)) ([07958f7](https://github.com/feast-dev/feast/commit/07958f71cd89984325ec3ca2006b17fe5d333d02)), closes [#4891](https://github.com/feast-dev/feast/issues/4891) + +# [0.42.0](https://github.com/feast-dev/feast/compare/v0.41.0...v0.42.0) (2024-12-05) + + +### Bug Fixes + +* Add adapters for sqlite datetime conversion ([#4797](https://github.com/feast-dev/feast/issues/4797)) ([e198b17](https://github.com/feast-dev/feast/commit/e198b173be6355c1f169aeaae2b503f2273f23f1)) +* Added grpcio extras to default feature-server image ([#4737](https://github.com/feast-dev/feast/issues/4737)) ([e9cd373](https://github.com/feast-dev/feast/commit/e9cd3733f041da99bb1e84843ffe5af697085c34)) +* Changing node version in release ([7089918](https://github.com/feast-dev/feast/commit/7089918509404b3d217e7a2a0161293a8d6cb8aa)) +* Feast create empty online table when FeatureView attribute online=False ([#4666](https://github.com/feast-dev/feast/issues/4666)) ([237c453](https://github.com/feast-dev/feast/commit/237c453c2da7d549b9bdb2c044ba284fbb9d9ba7)) +* Fix db store types in Operator CRD ([#4798](https://github.com/feast-dev/feast/issues/4798)) ([f09339e](https://github.com/feast-dev/feast/commit/f09339eda24785d0a57feb4cf785f297d1a02ccb)) +* Fix the config issue for postgres ([#4776](https://github.com/feast-dev/feast/issues/4776)) ([a36f7e5](https://github.com/feast-dev/feast/commit/a36f7e50d97c85595cbaa14165901924efa61cbb)) +* Fixed example materialize-incremental and improved explanation ([#4734](https://github.com/feast-dev/feast/issues/4734)) ([ca8a7ab](https://github.com/feast-dev/feast/commit/ca8a7ab888b53fe43db6e6437e7070c83e00c10d)) +* Fixed SparkSource docstrings so it wouldn't used inhereted class docstrings ([#4722](https://github.com/feast-dev/feast/issues/4722)) ([32e6aa1](https://github.com/feast-dev/feast/commit/32e6aa1e7c752551d455c5efd0974a938d756210)) +* Fixing PGVector integration tests ([#4778](https://github.com/feast-dev/feast/issues/4778)) ([88a0320](https://github.com/feast-dev/feast/commit/88a03205a4ecbd875e808f6e8f86fef4f93e6da6)) +* Incorrect type passed to assert_permissions in materialize endpoints ([#4727](https://github.com/feast-dev/feast/issues/4727)) ([b72c2da](https://github.com/feast-dev/feast/commit/b72c2daac80ac22d1d8160f155bb55a1bdbf16f7)) +* Issue of DataSource subclasses using parent abstract class docstrings ([#4730](https://github.com/feast-dev/feast/issues/4730)) ([b24acd5](https://github.com/feast-dev/feast/commit/b24acd50149cb4737d5c27aa3236881f8ad26fea)) +* Operator envVar positioning & tls.SecretRef.Name ([#4806](https://github.com/feast-dev/feast/issues/4806)) ([1115d96](https://github.com/feast-dev/feast/commit/1115d966df8ecff5553ae0c0879559f9ad735245)) +* Populates project created_time correctly according to created ti… ([#4686](https://github.com/feast-dev/feast/issues/4686)) ([a61b93c](https://github.com/feast-dev/feast/commit/a61b93c666a79ec72b48d0927b2a4e1598f6650b)) +* Reduce feast-server container image size & fix dev image build ([#4781](https://github.com/feast-dev/feast/issues/4781)) ([ccc9aea](https://github.com/feast-dev/feast/commit/ccc9aea6ee0a720c6dfddf9eaa6805e7b63fa7f1)) +* Removed version func from feature_store.py ([#4748](https://github.com/feast-dev/feast/issues/4748)) ([f902bb9](https://github.com/feast-dev/feast/commit/f902bb9765a2efd4b1325de80e3b4f2101bb3911)) +* Support registry instantiation for read-only users ([#4719](https://github.com/feast-dev/feast/issues/4719)) ([ca3d3c8](https://github.com/feast-dev/feast/commit/ca3d3c8f474ff6bf9f716c37df236bbc41bbd0d2)) +* Syntax Error in BigQuery While Retrieving Columns that Start wit… ([#4713](https://github.com/feast-dev/feast/issues/4713)) ([60fbc62](https://github.com/feast-dev/feast/commit/60fbc62080950549f28b9411e00926be168bea56)) +* Update release version in a pertinent Operator file ([#4708](https://github.com/feast-dev/feast/issues/4708)) ([764a8a6](https://github.com/feast-dev/feast/commit/764a8a657c045e99575bb8cfdc51afd9c61fa8e2)) + + +### Features + +* Add api contract to fastapi docs ([#4721](https://github.com/feast-dev/feast/issues/4721)) ([1a165c7](https://github.com/feast-dev/feast/commit/1a165c734ad8ee3923c786d80a00e4040cb1b1c8)) +* Add Couchbase as an online store ([#4637](https://github.com/feast-dev/feast/issues/4637)) ([824859b](https://github.com/feast-dev/feast/commit/824859b813a1d756887f1006fb25914a2018d097)) +* Add Operator support for spec.feastProject & status.applied fields ([#4656](https://github.com/feast-dev/feast/issues/4656)) ([430ac53](https://github.com/feast-dev/feast/commit/430ac535a5bd8311a485e51011a9602ca441d2d3)) +* Add services functionality to Operator ([#4723](https://github.com/feast-dev/feast/issues/4723)) ([d1d80c0](https://github.com/feast-dev/feast/commit/d1d80c0d208e25b92047fe5f162c67c00c69bb43)) +* Add TLS support to the Operator ([#4796](https://github.com/feast-dev/feast/issues/4796)) ([a617a6c](https://github.com/feast-dev/feast/commit/a617a6c8d67c6baaa6f9c1cc78b7799d72de48a3)) +* Added feast Go operator db stores support ([#4771](https://github.com/feast-dev/feast/issues/4771)) ([3302363](https://github.com/feast-dev/feast/commit/3302363e2f149715e1c0fb5597d0b91a97756db2)) +* Added support for setting env vars in feast services in feast controller ([#4739](https://github.com/feast-dev/feast/issues/4739)) ([84b24b5](https://github.com/feast-dev/feast/commit/84b24b547e40bab4fad664bb77cd864613267aad)) +* Adding docs outlining native Python transformations on singletons ([#4741](https://github.com/feast-dev/feast/issues/4741)) ([0150278](https://github.com/feast-dev/feast/commit/01502785109dfd64e3db03c855a34d9cab1a9073)) +* Adding first feast operator e2e test. ([#4791](https://github.com/feast-dev/feast/issues/4791)) ([8339f8d](https://github.com/feast-dev/feast/commit/8339f8d55c7263becda42ab41961224091dee727)) +* Adding github action to run the operator end-to-end tests. ([#4762](https://github.com/feast-dev/feast/issues/4762)) ([d8ccb00](https://github.com/feast-dev/feast/commit/d8ccb005ab8db0e79885b43aa430b78d1fbba379)) +* Adding ssl support for registry server. ([#4718](https://github.com/feast-dev/feast/issues/4718)) ([ccf7a55](https://github.com/feast-dev/feast/commit/ccf7a55e11165f4663384c580003cb809b5e0f83)) +* Adding SSL support for the React UI server and feast UI command. ([#4736](https://github.com/feast-dev/feast/issues/4736)) ([4a89252](https://github.com/feast-dev/feast/commit/4a89252cb18715458d724e5b54c77ed0de27cf3f)) +* Adding support for native Python transformations on a single dictionary ([#4724](https://github.com/feast-dev/feast/issues/4724)) ([9bbc1c6](https://github.com/feast-dev/feast/commit/9bbc1c61c7bbc38fce5568e6427257cf4d683fb2)) +* Adding TLS support for offline server. ([#4744](https://github.com/feast-dev/feast/issues/4744)) ([5d8d03f](https://github.com/feast-dev/feast/commit/5d8d03ff2086256aa2977e5ec2ecdc048154dc1f)) +* Building the feast image ([#4775](https://github.com/feast-dev/feast/issues/4775)) ([6635dde](https://github.com/feast-dev/feast/commit/6635dde9618d000d0567791018779fc188c893d8)) +* File persistence definition and implementation ([#4742](https://github.com/feast-dev/feast/issues/4742)) ([3bad4a1](https://github.com/feast-dev/feast/commit/3bad4a135cdd9184f1b8e3c9c52470552cf2799d)) +* Object store persistence in operator ([#4758](https://github.com/feast-dev/feast/issues/4758)) ([0ae86da](https://github.com/feast-dev/feast/commit/0ae86da3ab931832b0dfe357c0be82997d37430d)) +* OIDC authorization in Feast Operator ([#4801](https://github.com/feast-dev/feast/issues/4801)) ([eb111d6](https://github.com/feast-dev/feast/commit/eb111d673ee5cea2cfadda55d0917a591cd6c377)) +* Operator will create k8s serviceaccount for each feast service ([#4767](https://github.com/feast-dev/feast/issues/4767)) ([cde5760](https://github.com/feast-dev/feast/commit/cde5760cc94cccd4cbeed918acca09d1b106d7e5)) +* Printing more verbose logs when we start the offline server ([#4660](https://github.com/feast-dev/feast/issues/4660)) ([9d8d3d8](https://github.com/feast-dev/feast/commit/9d8d3d88a0ecccef4d610baf84f1b409276044dd)) +* PVC configuration and impl ([#4750](https://github.com/feast-dev/feast/issues/4750)) ([785a190](https://github.com/feast-dev/feast/commit/785a190b50873bca2704c835027290787fe56656)) +* Qdrant vectorstore support ([#4689](https://github.com/feast-dev/feast/issues/4689)) ([86573d2](https://github.com/feast-dev/feast/commit/86573d2778cb064fb7a930dfe08e84465084523f)) +* RBAC Authorization in Feast Operator ([#4786](https://github.com/feast-dev/feast/issues/4786)) ([0ef5acc](https://github.com/feast-dev/feast/commit/0ef5acccc09a4a4a379a84cdacb0f5b7d9e8df70)) +* Support for nested timestamp fields in Spark Offline store ([#4740](https://github.com/feast-dev/feast/issues/4740)) ([d4d94f8](https://github.com/feast-dev/feast/commit/d4d94f8ed76f72625305ad6e80337670664ba9b0)) +* Update the go feature server from Expedia code repo. ([#4665](https://github.com/feast-dev/feast/issues/4665)) ([6406625](https://github.com/feast-dev/feast/commit/6406625ff8895fa65b11d587246f7d1f5feaecba)) +* Updated feast Go operator db stores ([#4809](https://github.com/feast-dev/feast/issues/4809)) ([2c5a6b5](https://github.com/feast-dev/feast/commit/2c5a6b554cf6170b2590f32124cd7b84121cb864)) +* Updated sample secret following review ([#4811](https://github.com/feast-dev/feast/issues/4811)) ([dc9f825](https://github.com/feast-dev/feast/commit/dc9f8259ee6a2043a6fce88ea0d0a5e59494ef76)) + # [0.41.0](https://github.com/feast-dev/feast/compare/v0.40.0...v0.41.0) (2024-10-26) diff --git a/MANIFEST.in b/MANIFEST.in index 96f7c38c8a5..c43708cdc6f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,3 +6,5 @@ prune infra prune examples graft sdk/python/feast/ui/build +graft sdk/python/feast/embedded_go/lib +recursive-include sdk/python/feast/static * diff --git a/Makefile b/Makefile index 4f0f8876154..c33685ef2cb 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,13 @@ # limitations under the License. # -ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) + +# install tools in project (tool) dir to not pollute the system +TOOL_DIR := $(ROOT_DIR)/tools +export GOBIN=$(TOOL_DIR)/bin +export PATH := $(TOOL_DIR)/bin:$(PATH) + MVN := mvn -f java/pom.xml ${MAVEN_EXTRA_OPTS} OS := linux ifeq ($(shell uname -s), Darwin) @@ -23,7 +29,16 @@ endif TRINO_VERSION ?= 376 PYTHON_VERSION = ${shell python --version | grep -Eo '[0-9]\.[0-9]+'} +PYTHON_VERSIONS := 3.9 3.10 3.11 + +define get_env_name +$(subst .,,py$(1)) +endef + + # General +$(TOOL_DIR): + mkdir -p $@/bin format: format-python format-java @@ -35,50 +50,50 @@ protos: compile-protos-python compile-protos-docs build: protos build-java build-docker -# Python SDK +# Python SDK - local +# formerly install-python-ci-dependencies-uv-venv +# editable install +install-python-dependencies-dev: + uv pip sync sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt + uv pip install --no-deps -e . -install-python-dependencies-uv: - uv pip sync --system sdk/python/requirements/py$(PYTHON_VERSION)-requirements.txt - uv pip install --system --no-deps . +# Python SDK - system +# the --system flag installs dependencies in the global python context +# instead of a venv which is useful when working in a docker container or ci. -install-python-dependencies-uv-venv: - uv pip sync sdk/python/requirements/py$(PYTHON_VERSION)-requirements.txt - uv pip install --no-deps . +# Used in github actions/ci +# formerly install-python-ci-dependencies-uv +install-python-dependencies-ci: + uv pip sync --system sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt + uv pip install --system --no-deps -e . +# Used by multicloud/Dockerfile.dev install-python-ci-dependencies: python -m piptools sync sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt pip install --no-deps -e . -install-python-ci-dependencies-uv: - uv pip sync --system sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt - uv pip install --system --no-deps -e . - -install-python-ci-dependencies-uv-venv: - uv pip sync sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt - uv pip install --no-deps -e . - -lock-python-ci-dependencies: - uv pip compile --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py$(PYTHON_VERSION)-ci-requirements.txt - -compile-protos-python: - python infra/scripts/generate_protos.py - +# Currently used in test-end-to-end.sh install-python: python -m piptools sync sdk/python/requirements/py$(PYTHON_VERSION)-requirements.txt python setup.py develop -lock-python-dependencies: - uv pip compile --system --no-strip-extras setup.py --output-file sdk/python/requirements/py$(PYTHON_VERSION)-requirements.txt - lock-python-dependencies-all: - # Remove all existing requirements because we noticed the lock file is not always updated correctly. Removing and running the command again ensures that the lock file is always up to date. - rm -r sdk/python/requirements/* - pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.9 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.9-requirements.txt" - pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.9 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.9-ci-requirements.txt" - pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.10 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.10-requirements.txt" - pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.10 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.10-ci-requirements.txt" - pixi run --environment py311 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.11 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.11-requirements.txt" - pixi run --environment py311 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile -p 3.11 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.11-ci-requirements.txt" + # Remove all existing requirements because we noticed the lock file is not always updated correctly. + # Removing and running the command again ensures that the lock file is always up to date. + rm -rf sdk/python/requirements/* 2>/dev/null || true + + $(foreach ver,$(PYTHON_VERSIONS),\ + pixi run --environment $(call get_env_name,$(ver)) --manifest-path infra/scripts/pixi/pixi.toml \ + "uv pip compile -p $(ver) --system --no-strip-extras setup.py \ + --output-file sdk/python/requirements/py$(ver)-requirements.txt" && \ + pixi run --environment $(call get_env_name,$(ver)) --manifest-path infra/scripts/pixi/pixi.toml \ + "uv pip compile -p $(ver) --system --no-strip-extras setup.py --extra ci \ + --output-file sdk/python/requirements/py$(ver)-ci-requirements.txt" && \ + ) true + + +compile-protos-python: + python infra/scripts/generate_protos.py benchmark-python: IS_TEST=True python -m pytest --integration --benchmark --benchmark-autosave --benchmark-save-data sdk/python/tests @@ -90,15 +105,28 @@ test-python-unit: python -m pytest -n 8 --color=yes sdk/python/tests test-python-integration: - python -m pytest -n 8 --integration --color=yes --durations=10 --timeout=1200 --timeout_method=thread --dist loadgroup \ + python -m pytest --tb=short -v -n 8 --integration --color=yes --durations=10 --timeout=1200 --timeout_method=thread --dist loadgroup \ -k "(not snowflake or not test_historical_features_main)" \ + -m "not rbac_remote_integration_test" \ + --log-cli-level=INFO -s \ sdk/python/tests test-python-integration-local: FEAST_IS_LOCAL_TEST=True \ FEAST_LOCAL_ONLINE_CONTAINER=True \ - python -m pytest -n 8 --color=yes --integration --durations=10 --timeout=1200 --timeout_method=thread --dist loadgroup \ + python -m pytest --tb=short -v -n 8 --color=yes --integration --durations=10 --timeout=1200 --timeout_method=thread --dist loadgroup \ + -k "not test_lambda_materialization and not test_snowflake_materialization" \ + -m "not rbac_remote_integration_test" \ + --log-cli-level=INFO -s \ + sdk/python/tests + +test-python-integration-rbac-remote: + FEAST_IS_LOCAL_TEST=True \ + FEAST_LOCAL_ONLINE_CONTAINER=True \ + python -m pytest --tb=short -v -n 8 --color=yes --integration --durations=10 --timeout=1200 --timeout_method=thread --dist loadgroup \ -k "not test_lambda_materialization and not test_snowflake_materialization" \ + -m "rbac_remote_integration_test" \ + --log-cli-level=INFO -s \ sdk/python/tests test-python-integration-container: @@ -242,7 +270,7 @@ test-python-universal-postgres-online: test-python-universal-pgvector-online: PYTHONPATH='.' \ - FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.pgvector_repo_configuration \ + FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.postgres_online_store.pgvector_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.postgres \ python -m pytest -n 8 --integration \ -k "not test_universal_cli and \ @@ -256,10 +284,13 @@ test-python-universal-postgres-online: not gcs_registry and \ not s3_registry and \ not test_universal_types and \ + not test_validation and \ + not test_spark_materialization_consistency and \ + not test_historical_features_containing_backfills and \ not test_snowflake" \ sdk/python/tests - test-python-universal-mysql-online: +test-python-universal-mysql-online: PYTHONPATH='.' \ FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.mysql_online_store.mysql_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.mysql \ @@ -283,7 +314,11 @@ test-python-universal-cassandra: FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.cassandra_online_store.cassandra_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.cassandra \ python -m pytest -x --integration \ - sdk/python/tests + sdk/python/tests/integration/offline_store/test_feature_logging.py \ + --ignore=sdk/python/tests/integration/offline_store/test_validation.py \ + -k "not test_snowflake and \ + not test_spark_materialization_consistency and \ + not test_universal_materialization" test-python-universal-hazelcast: PYTHONPATH='.' \ @@ -321,7 +356,7 @@ test-python-universal-cassandra-no-cloud-providers: not test_snowflake" \ sdk/python/tests - test-python-universal-elasticsearch-online: +test-python-universal-elasticsearch-online: PYTHONPATH='.' \ FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.elasticsearch_online_store.elasticsearch_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.elasticsearch \ @@ -340,6 +375,14 @@ test-python-universal-cassandra-no-cloud-providers: not test_snowflake" \ sdk/python/tests +test-python-universal-milvus-online: + PYTHONPATH='.' \ + FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.milvus_online_store.milvus_repo_configuration \ + PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.milvus \ + python -m pytest -n 8 --integration \ + -k "test_retrieve_online_milvus_documents" \ + sdk/python/tests --ignore=sdk/python/tests/integration/offline_store/test_dqm_validation.py + test-python-universal-singlestore-online: PYTHONPATH='.' \ FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.singlestore_repo_configuration \ @@ -351,7 +394,7 @@ test-python-universal-singlestore-online: not test_snowflake" \ sdk/python/tests - test-python-universal-qdrant-online: +test-python-universal-qdrant-online: PYTHONPATH='.' \ FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.qdrant_online_store.qdrant_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.qdrant \ @@ -359,9 +402,36 @@ test-python-universal-singlestore-online: -k "test_retrieve_online_documents" \ sdk/python/tests/integration/online_store/test_universal_online.py +# To use Couchbase as an offline store, you need to create an Couchbase Capella Columnar cluster on cloud.couchbase.com. +# Modify environment variables COUCHBASE_COLUMNAR_CONNECTION_STRING, COUCHBASE_COLUMNAR_USER, and COUCHBASE_COLUMNAR_PASSWORD +# with the details from your Couchbase Columnar Cluster. +test-python-universal-couchbase-offline: + PYTHONPATH='.' \ + FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.offline_stores.contrib.couchbase_columnar_repo_configuration \ + PYTEST_PLUGINS=feast.infra.offline_stores.contrib.couchbase_offline_store.tests \ + COUCHBASE_COLUMNAR_CONNECTION_STRING=couchbases:// \ + COUCHBASE_COLUMNAR_USER=username \ + COUCHBASE_COLUMNAR_PASSWORD=password \ + python -m pytest -n 8 --integration \ + -k "not test_historical_retrieval_with_validation and \ + not test_historical_features_persisting and \ + not test_universal_cli and \ + not test_go_feature_server and \ + not test_feature_logging and \ + not test_reorder_columns and \ + not test_logged_features_validation and \ + not test_lambda_materialization_consistency and \ + not test_offline_write and \ + not test_push_features_to_offline_store and \ + not gcs_registry and \ + not s3_registry and \ + not test_snowflake and \ + not test_universal_types" \ + sdk/python/tests + test-python-universal-couchbase-online: PYTHONPATH='.' \ - FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.contrib.couchbase_repo_configuration \ + FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.couchbase_online_store.couchbase_repo_configuration \ PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.couchbase \ python -m pytest -n 8 --integration \ -k "not test_universal_cli and \ @@ -428,18 +498,19 @@ kill-trino-locally: # Docker -build-docker: build-feature-server-python-aws-docker build-feature-transformation-server-docker build-feature-server-java-docker +build-docker: build-feature-server-docker build-feature-transformation-server-docker build-feature-server-java-docker build-feast-operator-docker push-ci-docker: docker push $(REGISTRY)/feast-ci:$(VERSION) push-feature-server-docker: - docker push $(REGISTRY)/feature-server:$$VERSION + docker push $(REGISTRY)/feature-server:$(VERSION) build-feature-server-docker: - docker buildx build --build-arg VERSION=$$VERSION \ - -t $(REGISTRY)/feature-server:$$VERSION \ - -f sdk/python/feast/infra/feature_servers/multicloud/Dockerfile --load . + docker buildx build \ + -t $(REGISTRY)/feature-server:$(VERSION) \ + -f sdk/python/feast/infra/feature_servers/multicloud/Dockerfile \ + --load sdk/python/feast/infra/feature_servers/multicloud push-feature-transformation-server-docker: docker push $(REGISTRY)/feature-transformation-server:$(VERSION) @@ -484,10 +555,18 @@ build-feast-operator-docker: # Dev images build-feature-server-dev: - docker buildx build --build-arg VERSION=dev \ + docker buildx build \ -t feastdev/feature-server:dev \ -f sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev --load . +build-feature-server-dev-docker: + docker buildx build \ + -t $(REGISTRY)/feature-server:$(VERSION) \ + -f sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev --load . + +push-feature-server-dev-docker: + docker push $(REGISTRY)/feature-server:$(VERSION) + build-java-docker-dev: make build-java-no-tests REVISION=dev docker buildx build --build-arg VERSION=dev \ @@ -536,3 +615,64 @@ build-helm-docs: # Note: requires node and yarn to be installed build-ui: cd $(ROOT_DIR)/sdk/python/feast/ui && yarn upgrade @feast-dev/feast-ui --latest && yarn install && npm run build --omit=dev + + + +# Go SDK & embedded +PB_REL = https://github.com/protocolbuffers/protobuf/releases +PB_VERSION = 3.11.2 +PB_ARCH := $(shell uname -m) +ifeq ($(PB_ARCH), arm64) + PB_ARCH=aarch_64 +endif +PB_PROTO_FOLDERS=core registry serving types storage + +$(TOOL_DIR)/protoc-$(PB_VERSION)-$(OS)-$(PB_ARCH).zip: $(TOOL_DIR) + cd $(TOOL_DIR) && \ + curl -LO $(PB_REL)/download/v$(PB_VERSION)/protoc-$(PB_VERSION)-$(OS)-$(PB_ARCH).zip + +.PHONY: install-go-proto-dependencies +install-go-proto-dependencies: $(TOOL_DIR)/protoc-$(PB_VERSION)-$(OS)-$(PB_ARCH).zip + unzip -u $(TOOL_DIR)/protoc-$(PB_VERSION)-$(OS)-$(PB_ARCH).zip -d $(TOOL_DIR) + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.31.0 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 + +.PHONY: compile-protos-go +compile-protos-go: install-go-proto-dependencies + $(foreach folder,$(PB_PROTO_FOLDERS), \ + protoc --proto_path=$(ROOT_DIR)/protos \ + --go_out=$(ROOT_DIR)/go/protos \ + --go_opt=module=github.com/feast-dev/feast/go/protos \ + --go-grpc_out=$(ROOT_DIR)/go/protos \ + --go-grpc_opt=module=github.com/feast-dev/feast/go/protos $(ROOT_DIR)/protos/feast/$(folder)/*.proto; ) true + +#install-go-ci-dependencies: + # go install golang.org/x/tools/cmd/goimports + # python -m pip install "pybindgen==0.22.1" "grpcio-tools>=1.56.2,<2" "mypy-protobuf>=3.1" + +.PHONY: build-go +build-go: compile-protos-go + go build -o feast ./go/main.go + +.PHONY: install-feast-ci-locally +install-feast-ci-locally: + uv pip install -e ".[ci]" + +.PHONY: test-go +test-go: compile-protos-go install-feast-ci-locally compile-protos-python + CGO_ENABLED=1 go test -coverprofile=coverage.out ./... && go tool cover -html=coverage.out -o coverage.html + +.PHONY: format-go +format-go: + gofmt -s -w go/ + +.PHONY: lint-go +lint-go: compile-protos-go + go vet ./go/internal/feast + +.PHONY: build-go-docker-dev +build-go-docker-dev: + docker buildx build --build-arg VERSION=dev \ + -t feastdev/feature-server-go:dev \ + -f go/infra/docker/feature-server/Dockerfile --load . + diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000000..852b3fdf8c6 --- /dev/null +++ b/OWNERS @@ -0,0 +1,15 @@ +# This file is being used by RedHat for running e2e CI + +approvers: +- redhathameed +- tmihalac +- accorvin +- amsharma3 +- franciscojavierarceo +options: {} +reviewers: +- redhathameed +- tmihalac +- accorvin +- amsharma3 +- franciscojavierarceo \ No newline at end of file diff --git a/README.md b/README.md index e02fb978d7b..e820d3152d3 100644 --- a/README.md +++ b/README.md @@ -151,6 +151,9 @@ The list below contains the functionality that contributors are planning to deve * We welcome contribution to all items in the roadmap! +* **Natural Language Processing** + * [x] Vector Search (Alpha release. See [RFC](https://docs.google.com/document/d/18IWzLEA9i2lDWnbfbwXnMCg3StlqaLVI-uRpQjr_Vos/edit#heading=h.9gaqqtox9jg6)) + * [ ] [Enhanced Feature Server and SDK for native support for NLP](https://github.com/feast-dev/feast/issues/4964) * **Data Sources** * [x] [Snowflake source](https://docs.feast.dev/reference/data-sources/snowflake) * [x] [Redshift source](https://docs.feast.dev/reference/data-sources/redshift) @@ -160,6 +163,7 @@ The list below contains the functionality that contributors are planning to deve * [x] [Hive (community plugin)](https://github.com/baineng/feast-hive) * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/data-sources/postgres) * [x] [Spark (contrib plugin)](https://docs.feast.dev/reference/data-sources/spark) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/data-sources/couchbase) * [x] Kafka / Kinesis sources (via [push support into the online store](https://docs.feast.dev/reference/data-sources/push)) * **Offline Stores** * [x] [Snowflake](https://docs.feast.dev/reference/offline-stores/snowflake) @@ -170,6 +174,7 @@ The list below contains the functionality that contributors are planning to deve * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/offline-stores/postgres) * [x] [Trino (contrib plugin)](https://github.com/Shopify/feast-trino) * [x] [Spark (contrib plugin)](https://docs.feast.dev/reference/offline-stores/spark) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/offline-stores/couchbase) * [x] [In-memory / Pandas](https://docs.feast.dev/reference/offline-stores/file) * [x] [Custom offline store support](https://docs.feast.dev/how-to-guides/customizing-feast/adding-a-new-offline-store) * **Online Stores** @@ -184,12 +189,14 @@ The list below contains the functionality that contributors are planning to deve * [x] [Azure Cache for Redis (community plugin)](https://github.com/Azure/feast-azure) * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/online-stores/postgres) * [x] [Cassandra / AstraDB (contrib plugin)](https://docs.feast.dev/reference/online-stores/cassandra) + * [x] [ScyllaDB (contrib plugin)](https://docs.feast.dev/reference/online-stores/scylladb) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/online-stores/couchbase) * [x] [Custom online store support](https://docs.feast.dev/how-to-guides/customizing-feast/adding-support-for-a-new-online-store) * **Feature Engineering** - * [x] On-demand Transformations (Beta release. See [RFC](https://docs.google.com/document/d/1lgfIw0Drc65LpaxbUu49RCeJgMew547meSJttnUqz7c/edit#)) + * [x] On-demand Transformations (On Read) (Beta release. See [RFC](https://docs.google.com/document/d/1lgfIw0Drc65LpaxbUu49RCeJgMew547meSJttnUqz7c/edit#)) * [x] Streaming Transformations (Alpha release. See [RFC](https://docs.google.com/document/d/1UzEyETHUaGpn0ap4G82DHluiCj7zEbrQLkJJkKSv4e8/edit)) * [ ] Batch transformation (In progress. See [RFC](https://docs.google.com/document/d/1964OkzuBljifDvkV-0fakp2uaijnVzdwWNGdz7Vz50A/edit)) - * [ ] Persistent On-demand Transformations (Beta release. See [GitHub Issue](https://github.com/feast-dev/feast/issues/4376)) + * [x] On-demand Transformations (On Write) (Beta release. See [GitHub Issue](https://github.com/feast-dev/feast/issues/4376)) * **Streaming** * [x] [Custom streaming ingestion job support](https://docs.feast.dev/how-to-guides/customizing-feast/creating-a-custom-provider) * [x] [Push based streaming data ingestion to online store](https://docs.feast.dev/reference/data-sources/push) @@ -212,8 +219,6 @@ The list below contains the functionality that contributors are planning to deve * [x] DataHub integration (see [DataHub Feast docs](https://datahubproject.io/docs/generated/ingestion/sources/feast/)) * [x] Feast Web UI (Beta release. See [docs](https://docs.feast.dev/reference/alpha-web-ui)) * [ ] Feast Lineage Explorer -* **Natural Language Processing** - * [x] Vector Search (Alpha release. See [RFC](https://docs.google.com/document/d/18IWzLEA9i2lDWnbfbwXnMCg3StlqaLVI-uRpQjr_Vos/edit#heading=h.9gaqqtox9jg6)) ## 🎓 Important Resources diff --git a/community/ADOPTERS.md b/community/ADOPTERS.md index 5ef285b41b8..38da2012efe 100644 --- a/community/ADOPTERS.md +++ b/community/ADOPTERS.md @@ -4,13 +4,14 @@ Below are the adopters of Feast. If you are using Feast please add yourself into the following list by a pull request. Please keep the list in alphabetical order. -| Organization | Contact | GitHub Username | -| ------------ | ------- | ------- | -| Affirm | Francisco Javier Arceo | franciscojavierarceo | -| Bank of Georgia | Tornike Gurgenidze | tokoko | -| Get Ground | Zhiling Chen | zhilingc | -| Gojek | Pradithya Aria Pura | pradithya | -| Twitter | David Liu | mavysavydav| -| SeatGeek | Rob Howley | robhowley | -| Shopify | Matt Delacour | MattDelac | -| Snowflake | Miles Adkins | sfc-gh-madkins | +| Organization | Contact | GitHub Username | +|-----------------|------------------------|----------------------| +| Affirm | Francisco Javier Arceo | franciscojavierarceo | +| Bank of Georgia | Tornike Gurgenidze | tokoko | +| Get Ground | Zhiling Chen | zhilingc | +| Gojek | Pradithya Aria Pura | pradithya | +| Picnic | Tom Steenbergen | TomSteenbergen | +| Twitter | David Liu | mavysavydav | +| SeatGeek | Rob Howley | robhowley | +| Shopify | Matt Delacour | MattDelac | +| Snowflake | Miles Adkins | sfc-gh-madkins | diff --git a/community/maintainers.md b/community/maintainers.md index 5ccd347be00..779689851d7 100644 --- a/community/maintainers.md +++ b/community/maintainers.md @@ -9,29 +9,29 @@ In alphabetical order | Name | GitHub Username | Email | Organization | | -------------- | ---------------- |-----------------------------| ------------------ | | Achal Shah | `achals` | achals@gmail.com | Tecton | -| Edson Tirelli | `etirelli` | ed.tirelli@gmail.com | Red Hat | | Francisco Javier Arceo | `franciscojavierarceo` | arceofrancisco@gmail.com | Affirm | | Hao Xu | `HaoXuAI` | sduxuhao@gmail.com | JPMorgan | -| Jeremy Ary | `jeremyary` | jeremy.ary@gmail.com | Red Hat | | Shuchu Han | `shuchu` | shuchu.han@gmail.com | Independent | | Willem Pienaar | `woop` | will.pienaar@gmail.com | Cleric | -| Zhiling Chen | `zhilingc` | chnzhlng@gmail.com | GetGround | -| Tornike Gurgenidze | `tokoko` | togurgenidze@gmail.com | Bank of Georgia | +| Zhiling Chen | `zhilingc` | chnzhlng@gmail.com | GetGround | +| Tornike Gurgenidze | `tokoko` | togurgenidze@gmail.com | Bank of Georgia | ## Emeritus Maintainers -| Name | GitHub Username | Email | Organization | -|---------------------|-----------------|-----------------------------|-------------------| -| Oleg Avdeev | oavdeev | oleg.v.avdeev@gmail.com | Tecton | -| Oleksii Moskalenko | pyalex | moskalenko.alexey@gmail.com | Tecton | -| Jay Parthasarthy | jparthasarthy | jparthasarthy@gmail.com | Tecton | -| Danny Chiao | adchia | danny@tecton.ai | Tecton | -| Pradithya Aria Pura | pradithya | pradithya.aria@gmail.com | Gojek | -| Tsotne Tabidze | tsotnet | tsotnet@gmail.com | Tecton | -| Abhin Chhabra | chhabrakadabra | chhabra.abhin@gmail.com | Shopify | -| Danny Chiao | adchia | danny@tecton.ai | Tecton | -| David Liu | mavysavydav | davidyliuliu@gmail.com | Twitter | -| Matt Delacour | MattDelac | mdelacour@hey.com | Shopify | -| Miles Adkins | sfc-gh-madkins | miles.adkins@snowflake.com | Snowflake | -| Felix Wang | `felixwang9817` | wangfelix98@gmail.com | Tecton | -| Kevin Zhang | `kevjumba` | kevin.zhang.13499@gmail.com | Tecton | +| Name | GitHub Username | Email | Organization | +|---------------------|----------------|---------------------------|-------------------| +| Edson Tirelli | `etirelli` | ed.tirelli@gmail.com | Red Hat | +| Jeremy Ary | `jeremyary` | jeremy.ary@gmail.com | Red Hat | +| Oleg Avdeev | oavdeev | oleg.v.avdeev@gmail.com | Tecton | +| Oleksii Moskalenko | pyalex | moskalenko.alexey@gmail.com | Tecton | +| Jay Parthasarthy | jparthasarthy | jparthasarthy@gmail.com | Tecton | +| Danny Chiao | adchia | danny@tecton.ai | Tecton | +| Pradithya Aria Pura | pradithya | pradithya.aria@gmail.com | Gojek | +| Tsotne Tabidze | tsotnet | tsotnet@gmail.com | Tecton | +| Abhin Chhabra | chhabrakadabra | chhabra.abhin@gmail.com | Shopify | +| Danny Chiao | adchia | danny@tecton.ai | Tecton | +| David Liu | mavysavydav | davidyliuliu@gmail.com | Twitter | +| Matt Delacour | MattDelac | mdelacour@hey.com | Shopify | +| Miles Adkins | sfc-gh-madkins | miles.adkins@snowflake.com | Snowflake | +| Felix Wang | `felixwang9817` | wangfelix98@gmail.com | Tecton | +| Kevin Zhang | `kevjumba` | kevin.zhang.13499@gmail.com | Tecton | diff --git a/docs/README.md b/docs/README.md index 5e36e1ce40a..02ecaefa10c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -11,12 +11,12 @@ for historical feature extraction used in model training and an (2) [online stor for serving features at low-latency in production systems and applications. Feast is a configurable operational data system that re-uses existing infrastructure to manage and serve machine learning -features to realtime models. For more details please review our [architecture](getting-started/architecture/overview.md). +features to realtime models. For more details, please review our [architecture](getting-started/architecture/overview.md). Concretely, Feast provides: -* A python SDK for programtically defining features, entities, sources, and (optionally) transformations -* A python SDK for for reading and writing features to configured offline and online data stores +* A Python SDK for programmatically defining features, entities, sources, and (optionally) transformations +* A Python SDK for reading and writing features to configured offline and online data stores * An [optional feature server](reference/feature-servers/README.md) for reading and writing features (useful for non-python languages) * A [UI](reference/alpha-web-ui.md) for viewing and exploring information about features defined in the project * A [CLI tool](reference/feast-cli-commands.md) for viewing and updating feature information @@ -24,8 +24,8 @@ Concretely, Feast provides: Feast allows ML platform teams to: * **Make features consistently available for training and low-latency serving** by managing an _offline store_ (to process historical data for scale-out batch scoring or model training), a low-latency _online store_ (to power real-time prediction)_,_ and a battle-tested _feature server_ (to serve pre-computed features online). -* **Avoid data leakage** by generating point-in-time correct feature sets so data scientists can focus on feature engineering rather than debugging error-prone dataset joining logic. This ensure that future feature values do not leak to models during training. -* **Decouple ML from data infrastructure** by providing a single data access layer that abstracts feature storage from feature retrieval, ensuring models remain portable as you move from training models to serving models, from batch models to realtime models, and from one data infra system to another. +* **Avoid data leakage** by generating point-in-time correct feature sets so data scientists can focus on feature engineering rather than debugging error-prone dataset joining logic. This ensures that future feature values do not leak to models during training. +* **Decouple ML from data infrastructure** by providing a single data access layer that abstracts feature storage from feature retrieval, ensuring models remain portable as you move from training models to serving models, from batch models to real-time models, and from one data infra system to another. {% hint style="info" %} **Note:** Feast today primarily addresses _timestamped structured data_. @@ -42,25 +42,28 @@ serving system must make a request to the feature store to retrieve feature valu ## Who is Feast for? -Feast helps ML platform/MLOps teams with DevOps experience productionize real-time models. Feast also helps these teams -build a feature platform that improves collaboration between data engineers, software engineers, machine learning -engineers, and data scientists. +Feast helps ML platform/MLOps teams with DevOps experience productionize real-time models. Feast also helps these teams build a feature platform that improves collaboration between data engineers, software engineers, machine learning engineers, and data scientists. -Feast is likely **not** the right tool if you -* are in an organization that’s just getting started with ML and is not yet sure what the business impact of ML is +* *For Data Scientists*: Feast is a tool where you can easily define, store, and retrieve your features for both model development and model deployment. By using Feast, you can focus on what you do best: build features that power your AI/ML models and maximize the value of your data. +    +* *For MLOps Engineers*: Feast is a library that allows you to connect your existing infrastructure (e.g., online database, application server, microservice, analytical database, and orchestration tooling) that enables your Data Scientists to ship features for their models to production using a friendly SDK without having to be concerned with software engineering challenges that occur from serving real-time production systems. By using Feast, you can focus on maintaining a resilient system, instead of implementing features for Data Scientists. +    +* *For Data Engineers*: Feast provides a centralized catalog for storing feature definitions, allowing one to maintain a single source of truth for feature data. It provides the abstraction for reading and writing to many different types of offline and online data stores. Using either the provided Python SDK or the feature server service, users can write data to the online and/or offline stores and then read that data out again in either low-latency online scenarios for model inference, or in batch scenarios for model training. + +* *For AI Engineers*: Feast provides a platform designed to scale your AI applications by enabling seamless integration of richer data and facilitating fine-tuning. With Feast, you can optimize the performance of your AI models while ensuring a scalable and efficient data pipeline. ## What Feast is not? ### Feast is not -* **an** [**ETL**](https://en.wikipedia.org/wiki/Extract,\_transform,\_load) / [**ELT**](https://en.wikipedia.org/wiki/Extract,\_load,\_transform) **system.** Feast is not a general purpose data pipelining system. Users often leverage tools like [dbt](https://www.getdbt.com) to manage upstream data transformations. Feast does support some [transformations](getting-started/architecture/feature-transformetion.md). -* **a data orchestration tool:** Feast does not manage or orchestrate complex workflow DAGs. It relies on upstream data pipelines to produce feature values and integrations with tools like [Airflow](https://airflow.apache.org) to make features consistently available. -* **a data warehouse:** Feast is not a replacement for your data warehouse or the source of truth for all transformed data in your organization. Rather, Feast is a light-weight downstream layer that can serve data from an existing data warehouse (or other data sources) to models in production. -* **a database:** Feast is not a database, but helps manage data stored in other systems (e.g. BigQuery, Snowflake, DynamoDB, Redis) to make features consistently available at training / serving time +* **An** [**ETL**](https://en.wikipedia.org/wiki/Extract,\_transform,\_load) / [**ELT**](https://en.wikipedia.org/wiki/Extract,\_load,\_transform) **system.** Feast is not a general purpose data pipelining system. Users often leverage tools like [dbt](https://www.getdbt.com) to manage upstream data transformations. Feast does support some [transformations](getting-started/architecture/feature-transformation.md). +* **A data orchestration tool:** Feast does not manage or orchestrate complex workflow DAGs. It relies on upstream data pipelines to produce feature values and integrations with tools like [Airflow](https://airflow.apache.org) to make features consistently available. +* **A data warehouse:** Feast is not a replacement for your data warehouse or the source of truth for all transformed data in your organization. Rather, Feast is a lightweight downstream layer that can serve data from an existing data warehouse (or other data sources) to models in production. +* **A database:** Feast is not a database, but helps manage data stored in other systems (e.g. BigQuery, Snowflake, DynamoDB, Redis) to make features consistently available at training / serving time ### Feast does not _fully_ solve * **reproducible model training / model backtesting / experiment management**: Feast captures feature and model metadata, but does not version-control datasets / labels or manage train / test splits. Other tools like [DVC](https://dvc.org/), [MLflow](https://www.mlflow.org/), and [Kubeflow](https://www.kubeflow.org/) are better suited for this. -* **batch feature engineering**: Feast supports on demand and streaming transformations. Feast is also investing in supporting batch transformations. +* **batch feature engineering**: Feast supports on-demand and streaming transformations. Feast is also investing in supporting batch transformations. * **native streaming feature integration:** Feast enables users to push streaming features, but does not pull from streaming sources or manage streaming pipelines. * **lineage:** Feast helps tie feature values to model versions, but is not a complete solution for capturing end-to-end lineage from raw data sources to model versions. Feast also has community contributed plugins with [DataHub](https://datahubproject.io/docs/generated/ingestion/sources/feast/) and [Amundsen](https://github.com/amundsen-io/amundsen/blob/4a9d60176767c4d68d1cad5b093320ea22e26a49/databuilder/databuilder/extractor/feast\_extractor.py). * **data quality / drift detection**: Feast has experimental integrations with [Great Expectations](https://greatexpectations.io/), but is not purpose built to solve data drift / data quality issues. This requires more sophisticated monitoring across data pipelines, served feature values, labels, and model versions. @@ -72,7 +75,7 @@ Many companies have used Feast to power real-world ML use cases such as: * Personalizing online recommendations by leveraging pre-computed historical user or item features. * Online fraud detection, using features that compare against (pre-computed) historical transaction patterns * Churn prediction (an offline model), generating feature values for all users at a fixed cadence in batch -* Credit scoring, using pre-computed historical features to compute probability of default +* Credit scoring, using pre-computed historical features to compute the probability of default ## How can I get started? diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 325b9673538..8db4143697e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,6 +1,7 @@ # Table of contents * [Introduction](README.md) +* [Blog](blog/README.md) * [Community & getting help](community.md) * [Roadmap](roadmap.md) * [Changelog](https://github.com/feast-dev/feast/blob/master/CHANGELOG.md) @@ -18,21 +19,25 @@ * [Role-Based Access Control (RBAC)](getting-started/architecture/rbac.md) * [Concepts](getting-started/concepts/README.md) * [Overview](getting-started/concepts/overview.md) + * [Project](getting-started/concepts/project.md) * [Data ingestion](getting-started/concepts/data-ingestion.md) * [Entity](getting-started/concepts/entity.md) * [Feature view](getting-started/concepts/feature-view.md) * [Feature retrieval](getting-started/concepts/feature-retrieval.md) * [Point-in-time joins](getting-started/concepts/point-in-time-joins.md) - * [Permission](getting-started/concepts/permission.md) * [\[Alpha\] Saved dataset](getting-started/concepts/dataset.md) + * [Permission](getting-started/concepts/permission.md) + * [Tags](getting-started/concepts/tags.md) * [Components](getting-started/components/README.md) * [Overview](getting-started/components/overview.md) * [Registry](getting-started/components/registry.md) * [Offline store](getting-started/components/offline-store.md) * [Online store](getting-started/components/online-store.md) + * [Feature server](getting-started/components/feature-server.md) * [Batch Materialization Engine](getting-started/components/batch-materialization-engine.md) * [Provider](getting-started/components/provider.md) * [Authorization Manager](getting-started/components/authz_manager.md) + * [OpenTelemetry Integration](getting-started/components/open-telemetry.md) * [Third party integrations](getting-started/third-party-integrations.md) * [FAQ](getting-started/faq.md) @@ -64,6 +69,7 @@ * [Adding a new online store](how-to-guides/customizing-feast/adding-support-for-a-new-online-store.md) * [Adding a custom provider](how-to-guides/customizing-feast/creating-a-custom-provider.md) * [Adding or reusing tests](how-to-guides/adding-or-reusing-tests.md) +* [Starting Feast servers in TLS(SSL) Mode](how-to-guides/starting-feast-servers-tls-mode.md) ## Reference @@ -82,6 +88,7 @@ * [PostgreSQL (contrib)](reference/data-sources/postgres.md) * [Trino (contrib)](reference/data-sources/trino.md) * [Azure Synapse + Azure SQL (contrib)](reference/data-sources/mssql.md) + * [Couchbase (contrib)](reference/data-sources/couchbase.md) * [Offline stores](reference/offline-stores/README.md) * [Overview](reference/offline-stores/overview.md) * [Dask](reference/offline-stores/dask.md) @@ -89,6 +96,7 @@ * [BigQuery](reference/offline-stores/bigquery.md) * [Redshift](reference/offline-stores/redshift.md) * [DuckDB](reference/offline-stores/duckdb.md) + * [Couchbase Columnar (contrib)](reference/offline-stores/couchbase.md) * [Spark (contrib)](reference/offline-stores/spark.md) * [PostgreSQL (contrib)](reference/offline-stores/postgres.md) * [Trino (contrib)](reference/offline-stores/trino.md) @@ -112,6 +120,7 @@ * [Hazelcast](reference/online-stores/hazelcast.md) * [ScyllaDB](reference/online-stores/scylladb.md) * [SingleStore](reference/online-stores/singlestore.md) + * [Milvus](reference/online-stores/milvus.md) * [Registries](reference/registries/README.md) * [Local](reference/registries/local.md) * [S3](reference/registries/s3.md) diff --git a/docs/blog/README.md b/docs/blog/README.md new file mode 100644 index 00000000000..cc42cfe442f --- /dev/null +++ b/docs/blog/README.md @@ -0,0 +1,21 @@ +# Blog Posts + +Welcome to the Feast blog! Here you'll find articles about feature store development, new features, and community updates. + +## Featured Posts + +{% content-ref url="what-is-a-feature-store.md" %} +[what-is-a-feature-store.md](what-is-a-feature-store.md) +{% endcontent-ref %} + +{% content-ref url="the-future-of-feast.md" %} +[the-future-of-feast.md](the-future-of-feast.md) +{% endcontent-ref %} + +{% content-ref url="feast-supports-vector-database.md" %} +[feast-supports-vector-database.md](feast-supports-vector-database.md) +{% endcontent-ref %} + +{% content-ref url="rbac-role-based-access-controls.md" %} +[rbac-role-based-access-controls.md](rbac-role-based-access-controls.md) +{% endcontent-ref %} diff --git a/docs/blog/a-state-of-feast.md b/docs/blog/a-state-of-feast.md new file mode 100644 index 00000000000..7343effdcb4 --- /dev/null +++ b/docs/blog/a-state-of-feast.md @@ -0,0 +1,80 @@ +# A State of Feast + +*January 21, 2021* | *Willem Pienaar* + +## Introduction + +Two years ago we first announced the launch of Feast, an open source feature store for machine learning. Feast is an operational data system that solves some of the key challenges that ML teams encounter while productionizing machine learning systems. + +Recognizing that ML and Feast have advanced since we launched, we take a moment today to discuss the past, present and future of Feast. We consider the more significant lessons we learned while building Feast, where we see the project heading, and why teams should consider adopting Feast as part of their operational ML stacks. + +## Background + +Feast was developed to address the challenges faced while productionizing data for machine learning. In our original [Google Cloud article](https://cloud.google.com/blog/products/ai-machine-learning/introducing-feast-an-open-source-feature-store-for-machine-learning), we highlighted some of these challenges, namely: + +1. Features aren't reused. +2. Feature definitions are inconsistent across teams. +3. Getting features into production is hard. +4. Feature values are inconsistent between training and serving. + +Whereas an industry to solve data transformations and data-quality problems already existed, our focus for shaping Feast was to overcome operational ML hurdles that exist between data science and ML engineering. Toward that end, our initial aim was to provide: + +1. Registry: The registry is a common catalog with which to explore, develop, collaborate on, and publish new feature definitions within and across teams. It is the central interface for all interactions with the feature store. +2. Ingestion: A means for continually ingesting batch and streaming data and storing consistent copies in both an offline and online store. This layer automates most data-management work and ensures that features are always available for serving. +3. Serving: A feature-retrieval interface which provides a temporally consistent view of features for both training and online serving. Serving improves iteration speed by minimizing coupling to data infrastructure, and prevents training-serving skew through consistent data access. + +Guided by this design, we co-developed and shipped Feast with our friends over at Google. We then open sourced the project in early 2019, and have since been running Feast in production and at scale. In our follow up blog post, [Bridging ML Models and Data](https://blog.gojekengineering.com/feast-bridging-ml-models-and-data), we touched on the impact Feast has had at companies like Gojek. + +## Feast today + +Teams, large and small, are increasingly searching for ways to simplify the productionization and maintenance of their ML systems at scale. Since open sourcing Feast, we've seen both the demand for these tools and the activity around this project soar. Working alongside our open source community, we've released key pieces of our stack throughout the last year, and steadily expanded Feast into a robust feature store. Highlights include: + +* Point-in-time correct queries that prevent feature data leakage. +* A query optimized table-based data model in the form of feature sets. +* Storage connectors with implementations for Cassandra and Redis Cluster. +* Statistics generation and data validation through TFDV integration. +* Authentication and authorization support for SDKs and APIs. +* Diagnostic tooling through request/response logging, audit logs, and Statsd integration. + +Feast has grown more rapidly than initially anticipated, with multiple large companies, including Agoda, Gojek, Farfetch, Postmates, and Zulily adopting and/or contributing to the project. We've also been working closely with other open source teams, and we are excited to share that Feast is now a [component in Kubeflow](https://www.kubeflow.org/docs/components/feature-store/). Over the coming months we will be enhancing this integration, making it easier for users to deploy Feast and Kubeflow together. + +## Lessons learned + +Through frequent engagement with our community and by way of running Feast in production ourselves, we've learned critical lessons: + +Feast requires too much infrastructure: Requiring users provision a large system is a big ask. A minimal Feast deployment requires Kafka, Zookeeper, Postgres, Redis, and multiple Feast services. + +Feast lacks composability: Requiring all infrastructural components be present in order to have a functional system removes all modularity. + +Ingestion is too complex: Incorporating a Kafka-based stream-first ingestion layer trivializes data consistency across stores, but the complete ingestion flow from source to sink can still mysteriously fail at multiple points. + +Our technology choices hinder generalization: Leveraging technologies like BigQuery, Apache Beam on Dataflow, and Apache Kafka has allowed us to move faster in delivering functionality. However, these technologies now impede our ability to generalize to other clouds or deployment environments. + +## The future of Feast + +> *"Always in motion is the future."* +> – Yoda, The Empire Strikes Back + +While feature stores have already become essential systems at large technology companies, we believe their widespread adoption will begin in 2021. We also foresee the release of multiple managed feature stores over the next year, as vendors seek to enter the burgeoning operational ML market. + +As we've discussed, feature stores serve both offline and production ML needs, and therefore are primarily built by engineers for engineers. What we need, however, is a feature store that's purpose-built for data-science workflows. Feast will move away from an infrastructure-centric approach toward a more localized experience that does just this: builds on teams' existing data-science workflows. + +The lessons we've learned during the preceding two years have crystallized a vision for what Feast should become: a light-weight modular feature store. One that's easy to pick up, adds value to teams large and small, and can be progressively applied to production use cases that span multiple teams, projects, and cloud-environments. We aim to reach this by applying the following design principles: + +1. Python-first: First-class support for running a minimal version of Feast entirely from a notebook, with all infrastructural dependencies becoming optional enhancements. + * Encourages quick evaluation of the software and ensures Feast is user friendly + * Minimizes the operational burden of running the system in production + * Simplifies testing, developing, and maintaining Feast + +## Next Steps + +Our vision for Feast is not only ambitious, but actionable. Our next release, Feast 0.8, is the product of collaborating with both our open source community and our friends over at [Tecton](https://tecton.ai/). + +1. Python-first: We are migrating all core logic to Python, starting with training dataset retrieval and job management, providing a more responsive development experience. +2. Modular ingestion: We are shifting to managing batch and streaming ingestion separately, leading to more actionable metrics, logs, and statistics and an easier to understand and operate system. +3. Support for AWS: We are replacing GCP-specific technologies like Beam on Dataflow with Spark and adding native support for running Feast on AWS, our first steps toward cloud-agnosticism. +4. Data-source integrations: We are introducing support for a host of new data sources (Kinesis, Kafka, S3, GCS, BigQuery) and data formats (Parquet, JSON, Avro), ensuring teams can seamlessly integrate Feast into their existing data-infrastructure. + +## Get involved + +We've been inspired by the soaring community interest in and contributions to Feast. If you're curious to learn more about our mission to build a best-in-class feature store, or are looking to build your own: Check out our resources, say hello, and get involved! diff --git a/docs/blog/announcing-feast-0-11.md b/docs/blog/announcing-feast-0-11.md new file mode 100644 index 00000000000..b3b1f2f4aca --- /dev/null +++ b/docs/blog/announcing-feast-0-11.md @@ -0,0 +1,65 @@ +# Announcing Feast 0.11 + +*June 23, 2021* | *Jay Parthasarthy & Willem Pienaar* + +Feast 0.11 is here! This is the first release after the major changes introduced in Feast 0.10. We've focused on two areas in particular: + +1. Introducing a new online store, Redis, which supports feature serving at high throughput and low latency. +2. Improving the Feast user experience through reduced boilerplate, smoother workflows, and improved error messages. A key addition here is the introduction of *feature inferencing,* which allows Feast to dynamically discover data schemas in your source data. + +Let's get into it! + +### Support for Redis as an online store 🗝 + +Feast 0.11 introduces support for Redis as an online store, allowing teams to easily scale up Feast to support high volumes of online traffic. Using Redis with Feast is as easy as adding a few lines of configuration to your feature_store.yaml file: + +```yaml +project: fraud +registry: data/registry.db +provider: local +online_store: + type: redis + connection_string: localhost:6379 +``` + +Feast is then able to read and write from Redis as its online store. + +```bash +$ feast materialize + +Materializing 3 feature views to 2021-06-15 18:43:03+00:00 into the redis online store. + +user_account_features from 2020-06-16 18:43:04 to 2021-06-15 18:43:13: +100%|███████████████████████| 9944/9944 [00:04<00:00, 20065.15it/s] +user_transaction_count_7d from 2021-06-08 18:43:21 to 2021-06-15 18:43:03: +100%|███████████████████████| 9674/9674 [00:04<00:00, 19943.82it/s] +``` + +We're also working on making it easier for teams to add their own storage and compute systems through plugin interfaces. Please see this RFC for more details on the proposal. + +### Feature Inferencing 🔎 + +Before 0.11, users had to define each feature individually when defining Feature Views. Now, Feast infers the schema of a Feature View based on upstream data sources, significantly reducing boilerplate. + +Before: +```python +driver_hourly_stats_view = FeatureView( + name="driver_hourly_stats", + entities=["driver_id"], + ttl=timedelta(days=1), + features=[ + Feature(name="conv_rate", dtype=ValueType.FLOAT), + ], + input=BigQuerySource(table_ref="feast-oss.demo_data.driver_hourly_stats"), +) +``` + +Aside from these additions, a wide variety of small bug fixes, and UX improvements made it into this release. [Check out the changelog](https://github.com/feast-dev/feast/blob/master/CHANGELOG.md) for a full list of what's new. + +Special thanks and a big shoutout to the community contributors whose changes made it into this release: [MattDelac](https://github.com/MattDelac), [mavysavydav](https://github.com/mavysavydav), [szalai1](https://github.com/szalai1), [rightx2](https://github.com/rightx2) + +### Help us design Feast for AWS 🗺️ + +The 0.12 release will include native support for AWS. We are looking to meet with teams that are considering using Feast to gather feedback and help shape the product as design partners. We often help our design partners out with architecture or design reviews. If this sounds helpful to you, [join us in Slack](http://slack.feastsite.wpenginepowered.com/), or [book a call with Feast maintainers here](https://calendly.com/d/gc29-y88c/feast-chat-w-willem). + +### Feast from around the web 📣 diff --git a/docs/blog/faster-feature-transformations-in-feast.md b/docs/blog/faster-feature-transformations-in-feast.md new file mode 100644 index 00000000000..689a5b84abd --- /dev/null +++ b/docs/blog/faster-feature-transformations-in-feast.md @@ -0,0 +1,50 @@ +# Faster Feature Transformations in Feast 🏎️💨 + +*December 5, 2024* | *Francisco Javier Arceo, Shuchu Han* + +*Thank you to [Shuchu Han](https://www.linkedin.com/in/shuchu/), [Ross Briden](https://www.linkedin.com/in/ross-briden/), [Ankit Nadig](https://www.linkedin.com/in/ankit-nadig/), and the folks at Affirm for inspiring this work and creating an initial proof of concept.* + +Feature engineering is at the core of building high-performance machine learning models. The Feast team has introduced two major enhancements to [On Demand Feature Views](https://docs.feast.dev/reference/beta-on-demand-feature-views) (ODFVs), pushing the boundaries of efficiency and flexibility for data scientists and engineers. Here's a closer look at these exciting updates: + +## 1. Transformations with Native Python + +Traditionally, transformations in ODFVs were limited to Pandas-based operations. While powerful, Pandas transformations can be computationally expensive for certain use cases. Feast now introduces Native Python Mode, a feature that allows users to write transformations using pure Python. + +Key benefits of Native Python Mode include: + +* Blazing Speed: Transformations using Native Python are nearly 10x faster compared to Pandas for many operations. +* Intuitive Design: This mode supports list-based and singleton (row-level) transformations, making it easier for data scientists to think in terms of individual rows rather than entire datasets. +* Versatility: Users can now switch between batch and singleton transformations effortlessly, catering to both historical and online retrieval scenarios. + +Using the cProfile library and snakeviz we were able to profile the runtime for the ODFV transformation using both Pandas and Native python and observed a nearly 10x reduction in speed. + +## 2. Transformations on Writes + +Until now, ODFVs operated solely as transformations on reads, applying logic during online feature retrieval. While this ensured flexibility, it sometimes came at the cost of increased latency during retrieval. Feast now supports transformations on writes, enabling users to apply transformations during data ingestion and store the transformed features in the online store. + +Why does this matter? + +* Reduced Online Latency: With transformations pre-applied at ingestion, online retrieval becomes a straightforward lookup, significantly improving performance for latency-sensitive applications. +* Operational Flexibility: By toggling the write_to_online_store parameter, users can choose whether transformations should occur at write time (to optimize reads) or at read time (to preserve data freshness). + +Here's an example of applying transformations during ingestion: + +```python +@on_demand_feature_view( + sources=[driver_hourly_stats_view], +) + +df = pd.DataFrame() +df["conv_rate_adjusted"] = features_df["conv_rate"] * 1.1 +return df +``` + +With this new capability, data engineers can optimize online retrieval performance without sacrificing the flexibility of on-demand transformations. + +### The Future of ODFVs and Feature Transformations + +These enhancements bring ODFVs closer to the goal of seamless feature engineering at scale. By combining high-speed Python-based transformations with the ability to optimize retrieval latency, Feast empowers teams to build more efficient, responsive, and production-ready feature pipelines. + +For more detailed examples and use cases, check out the [documentation for On Demand Feature Views](https://docs.feast.dev/reference/beta-on-demand-feature-views). Whether you're a data scientist prototyping features or an engineer optimizing a production system, the new ODFV capabilities offer the tools you need to succeed. + +The future of Feature Transformations in Feast will be to unify feature transformations and feature views to allow for a simpler API. If you have thoughts or interest in giving feedback to the maintainers, feel free to comment directly on [the GitHub Issue](https://github.com/feast-dev/feast/issues/4584) or in [the RFC](https://docs.google.com/document/d/1KXCXcsXq1bU...). diff --git a/docs/blog/feast-0-10-announcement.md b/docs/blog/feast-0-10-announcement.md new file mode 100644 index 00000000000..54b10880d14 --- /dev/null +++ b/docs/blog/feast-0-10-announcement.md @@ -0,0 +1,163 @@ +# Announcing Feast 0.10 + +*April 15, 2021* | *Jay Parthasarthy & Willem Pienaar* + +Today, we're announcing Feast 0.10, an important milestone towards our vision for a lightweight feature store. Feast is an open source feature store that helps you serve features in production. It prevents feature leakage by building training datasets from your batch data, automates the process of loading and serving features in an online feature store, and ensures your models in production have a consistent view of feature data. + +With Feast 0.10, we've dramatically simplified the process of managing a feature store. This new release allows you to: + +* Run a minimal local feature store from your notebook +* Deploy a production-ready feature store into a cloud environment in 30 seconds +* Operate a feature store without Kubernetes, Spark, or self-managed infrastructure + +We think Feast 0.10 is the simplest and fastest way to productionize features. Let's get into it! + +## The challenge with feature stores + +In our previous post, [A State of Feast](https://blog.feastsite.wpenginepowered.com/post), we shared our vision for building a feature store that is accessible to all ML teams. Since then, we've been working towards this vision by shipping support for AWS, Azure, and on-prem deployments. + +Over the last couple of months we've seen a surge of interest in Feast. ML teams are increasingly being tasked with building production ML systems, and many are looking for an open source tool to help them operationalize their feature data in a structured way. However, many of these teams still can't afford to run their own feature stores: + +> "Feature stores are big infrastructure!" + +The conventional wisdom is that feature stores should be built and operated as platforms. It's not surprising why many have this notion. Feature stores require access to compute layers, offline and online databases, and need to directly interface with production systems. + +This infrastructure-centric approach means that operating your own feature store is a daunting task. Many teams simply don't have the resources to deploy and manage a feature store. Instead, ML teams are being forced to hack together their own custom scripts or end up delaying their projects as they wait for engineering support. + +## Towards a simpler feature store + +Our vision for Feast is to provide a feature store that a single data scientist can deploy for a single ML project, but can also scale up for use by large platform teams. We've made all infrastructure optional in Feast 0.10. That means no Spark, no Kubernetes, and no APIs, unless you need them. If you're just starting out we won't ask you to deploy and manage a platform. + +Additionally, we've pulled out the core of our software into a single Python framework. This framework allows teams to define features and declaratively provision a feature store based on those definitions, to either local or cloud environments. If you're just starting out with feature stores, you'll only need to manage a Git repository and run the Feast CLI or SDK, nothing more. + +Feast 0.10 introduces a first-class local mode: not installed through Docker containers, but through pip. It allows users to start a minimal feature store entirely from a notebook, allowing for rapid development against sample data and for testing against the same ML frameworks they're using in production. Finally, we've also begun adding first-class support for managed services. Feast 0.10 ships with native support for GCP, with more providers on the way. Platform teams running Feast at scale get the best of both worlds: a feature store that is able to scale up to production workloads by leveraging serverless technologies, with the flexibility to deploy the complete system to Kubernetes if needed. + +## The new experience + +Machine learning teams today are increasingly being tasked with building models that serve predictions online. These teams are also sitting on a wealth of feature data in warehouses like BigQuery, Snowflake, and Redshift. It's natural to use these features for model training, but hard to serve these features online at low latency. + +## 1. Create a feature repository + +Installing Feast is now as simple as: +```bash +pip install feast +``` + +We'll scaffold a feature repository based on a GCP template: +```bash +feast init driver_features -t gcp +``` + +A feature repository consists of a *feature_store.yaml*, and a collection of feature definitions. +``` +driver_features/ +└── feature_store.yaml +└── driver_features.py +``` + +The *feature_store.yaml* file contains infrastructural configuration necessary to set up a feature store. The *project* field is used to uniquely identify a feature store, the *registry* is a source of truth for feature definitions, and the *provider* specifies the environment in which our feature store will run. + +feature_store.yaml: +```yaml +project: driver_features +registry: gs://driver-fs/ +provider: gcp +``` + +The feature repository also contains Python based feature definitions, like *driver_features.py*. This file contains a single entity and a single feature view. Together they describe a collection of features in BigQuery that can be used for model training or serving. + +## 2. Set up a feature store + +Next we run *apply* to set up our feature store on GCP. +```bash +feast apply +``` + +Running Feast apply will register our feature definitions with the GCS feature registry and prepare our infrastructure for writing and reading features. Apply can be run idempotently, and is meant to be executed from CI when feature definitions change. + +At this point we haven't moved any data. We've only stored our feature definition metadata in the object store registry (GCS) and Feast has configured our infrastructure (Firestore in this case). + +## 3. Build a training dataset + +Feast is able to build training datasets from our existing feature data, including data at rest in our upstream tables in BigQuery. Now that we've registered our feature definitions with Feast we are able to build a training dataset. + +From our training pipeline: +```python +# Connect to the feature registry +fs = FeatureStore( + RepoConfig( + registry="gs://driver-fs/", + project="driver_features" + ) +) + +# Load our driver events table. This dataframe will be enriched with features from BigQuery +driver_events = pd.read_csv("driver_events.csv") + +# Build a training dataset from features in BigQuery +training_df = fs.get_historical_features( + feature_refs=[ + "driver_hourly_stats:conv_rate", + "driver_hourly_stats:acc_rate" + ], + entity_df=driver_events +).to_df() + +# Train a model, and ship it into production +model = ml.fit(training_data) +``` + +The code snippet above will join the user provided dataframe driver_events to our driver_stats BigQuery table in a point-in-time correct way. Feast is able to use the temporal properties (event timestamps) of feature tables to reconstruct a view of features at a specific point in time, from any amount of feature tables or views. + +## 4. Load features into the online store + +At this point we have trained our model and we are ready to serve it. However, our online feature store contains no data. In order to load features into the feature store we run *materialize-incremental* from the command line. + +Feast provides materialization commands that load features from an offline store into an online store. The default GCP provider exports features from BigQuery and writes them directly into Firestore using an in-memory process. Teams running at scale may want to leverage cloud-based ingestion by using a different provider configuration. + +## 5. Read features at low latency + +Now that our online store has been populated with the latest feature data, it's possible for our ML model services to read online features for prediction. + +From our model serving service: +```python +# Connect to the feature store +fs = feast.FeatureStore( + RepoConfig(registry="gs://driver-fs/", project="driver_features") +) + +# Query Firestore for online feature values +online_features = fs.get_online_features( + feature_refs=[ + "driver_hourly_stats:conv_rate", + "driver_hourly_stats:acc_rate" + ], + entity_rows=[{"driver_id": 1001}, {"driver_id": 1002}], +).to_dict() + +# Make a prediction +model.predict(online_features) +``` + +## 6. That's it + +At this point, you can schedule a Feast materialization job and set up our CI pipelines to update our infrastructure as feature definitions change. + +## What's next + +Our vision for Feast is to build a simple yet scalable feature store. With 0.10, we've shipped local workflows, infrastructure pluggability, and removed all infrastructural overhead. But we're still just beginning this journey, and there's still lots of work left to do. + +Over the next few months we will focus on making Feast as accessible to teams as possible. This means adding support for more data sources, streams, and cloud providers, but also means working closely with our users in unlocking new operational ML use cases and integrations. + +Feast is a community driven project, which means extensibility is always a key focus area for us. We want to make it super simple for you to add new data stores, compute layers, or bring Feast to a new stack. We've already seen teams begin development towards community providers for 0.10 during pre-release, and we welcome community contributions in this area. + +The next few months are going to be big ones for the Feast project. Stay tuned for more news, and we'd love for you to get started using Feast 0.10 today! + +## Get started + +* ✨ Try out our [quickstart](https://docs.feastsite.wpenginepowered.com/quickstart) if you're new to Feast, or learn more about Feast through our [documentation](https://docs.feastsite.wpenginepowered.com). +* 👋 Join our [Slack](http://slack.feastsite.wpenginepowered.com/) and say hello! Slack is the best forum for you to get in touch with Feast maintainers, and we love hearing feedback from teams trying out 0.10 Feast. +* 📢 Register for [apply()](https://www.applyconf.com/) – the ML data engineering conference, where we'll [demo Feast 0.10](https://www.applyconf.com/agenda/rethinking-feature-stores) and discuss [future developments for AWS](https://www.applyconf.com/agenda/bringing-feast-to-aws). +* 🔥 For teams that want to continue to run Feast on Kubernetes with Spark, have a look at our installation guides and Helm charts. + +🛠️ Thinking about contributing to Feast? Check out our [code on GitHub](https://github.com/feast-dev/feast)! diff --git a/docs/blog/feast-0-13-adds-on-demand-transforms-feature-servers-and-feature-views-without-entities.md b/docs/blog/feast-0-13-adds-on-demand-transforms-feature-servers-and-feature-views-without-entities.md new file mode 100644 index 00000000000..63dd71a52b6 --- /dev/null +++ b/docs/blog/feast-0-13-adds-on-demand-transforms-feature-servers-and-feature-views-without-entities.md @@ -0,0 +1,45 @@ +# Feast 0.13 adds on-demand transforms, feature servers, and feature views without entities + +*October 2, 2021* | *Danny Chiao, Tsotne Tabidze, Achal Shah, and Felix Wang* + +We are delighted to announce the release of [Feast 0.13](https://github.com/feast-dev/feast/releases/tag/v0.13.0), which introduces: + +* [Experimental] On demand feature views, which allow for consistently applied transformations in both training and online paths. This also introduces the concept of request data, which is data only available at the time of the prediction request, as potential inputs into these transformations +* [Experimental] Python feature servers, which allow you to quickly deploy a local HTTP server to serve online features. Serverless deployments and java feature servers to come soon! +* Feature views without entities, which allow you to specify features that should only be joined on event timestamps. You do not need lists of entities / entity values when defining and retrieving features from these feature views. + +Experimental features are subject to API changes in the near future as we collect feedback. If you have thoughts, please don't hesitate to reach out to the Feast team! + +### [Experimental] On demand feature views + +On demand feature views allows users to use existing features and request data to transform and create new features. Users define Python transformation logic which is executed in both historical retrieval and online retrieval paths.‌ This unlocks many use cases including fraud detection and recommender systems, and reduces training / serving skew by allowing for consistently applied transformations. Example features may include: + +* Transactional features such as `transaction_amount_greater_than_7d_average` where the inputs to features are part of the transaction, booking, or order event. +* Features requiring the current location or time such as `user_account_age`, `distance_driver_customer` +* Feature crosses where the keyspace is too large to precompute such as `movie_category_x_movie_rating` or `lat_bucket_x_lon_bucket` + +Currently, these transformations are executed locally. Future milestones include building a feature transformation server for executing transformations at higher scale. + +First, we define the transformations: + +```python +# Define a request data source which encodes features / information only +# available at request time (e.g. part of the user initiated HTTP request) +input_request = RequestDataSource( + name="vals_to_add", + schema={ + "val_to_add": ValueType.INT64, + } +) +``` + +See [On demand feature view](https://docs.feastsite.wpenginepowered.com/reference/on-demand-feature-view) for detailed info on how to use this functionality. + +### [Experimental] Python feature server + +The Python feature server provides an HTTP endpoint that serves features from the feature store. This enables users to retrieve features from Feast using any programming language that can make HTTP requests. As of now, it's only possible to run the server locally. A remote serverless feature server is currently being developed. Additionally, a low latency java feature server is in development. + +```bash +$ feast init feature_repo +Creating a new Feast repository in /home/tsotne/feast/feature_repo. +``` diff --git a/docs/blog/feast-0-14-adds-aws-lambda-feature-servers.md b/docs/blog/feast-0-14-adds-aws-lambda-feature-servers.md new file mode 100644 index 00000000000..45062d2d54e --- /dev/null +++ b/docs/blog/feast-0-14-adds-aws-lambda-feature-servers.md @@ -0,0 +1,56 @@ +# Feast 0.14 adds AWS Lambda feature servers + +*October 23, 2021* | *Tsotne Tabidze, Felix Wang* + +We are delighted to announce the release of [Feast 0.14](https://github.com/feast-dev/feast/releases/tag/v0.14.0), which introduces a new feature and several important improvements: + +* [Experimental] AWS Lambda feature servers, which allow you to quickly deploy an HTTP server to serve online features on AWS Lambda. GCP Cloud Run and Java feature servers are coming soon! +* Bug fixes around performance. The core online serving path is now significantly faster. +* Improvements for developer experience. The integration tests are now faster, and temporary tables created during integration tests are immediately dropped after the test. + +Experimental features are subject to API changes in the near future as we collect feedback. If you have thoughts, please don't hesitate to reach out to the Feast team! + +### [Experimental] AWS Lambda feature servers + +Prior to Feast 0.13, the only way for users to retrieve online features was to use the Python SDK. This was restrictive, so Feast 0.13 introduced local Python feature servers, allowing users to deploy a local HTTP server to serve their online features. Feast 0.14 now allows users to deploy a feature server on AWS Lambda to quickly serve features at scale. The new AWS Lambda feature servers are available for feature stores using the AWS provider. + +To deploy a feature server to AWS Lambda, they must be enabled and be given the appropriate permissions: + +```yaml +project: dev +registry: s3://feast/registries/dev +provider: aws +online_store: + region: us-west-2 +offline_store: + cluster_id: feast + region: us-west-2 + user: admin + database: feast + s3_staging_location: s3://feast/redshift/tests/staging_location + iam_role: arn:aws:iam::{aws_account}:role/redshift_s3_access_role +flags: + alpha_features: true + aws_lambda_feature_server: true +feature_server: + enabled: True + execution_role_name: arn:aws:iam::{aws_account}:role/lambda_execution_role +``` + +Calling `feast apply` will then deploy the feature server. The precise endpoint can be determined with by calling `feast endpoint`, and the endpoint can then be queried as follows: + +See [AWS Lambda feature server](https://docs.feastsite.wpenginepowered.com/reference/feature-servers/aws-lambda) for detailed info on how to use this functionality. + +### Performance bug fixes and developer experience improvements + +The provider for a feature store is now cached instead of being instantiated repeatedly, making the core online serving path 30% faster. + +Integration tests now run significantly faster on Github Actions due to caching. Also, tables created during integration tests were previously not always cleaned up properly; now they are always deleted immediately after the integration tests finish. + +### What's next + +We are collaborating with the community on supporting streaming sources, low latency serving, a Python feature transformation server for on demand transforms, improved support for Kubernetes deployments, and more. + +In addition, there is active community work on building Hive, Snowflake, Azure, Astra, Presto, and Alibaba Cloud connectors. If you have thoughts on what to build next in Feast, please fill out this [form](https://docs.google.com/forms/d/e/1FAIpQLSfa1nR). + +Download Feast 0.14 today from [PyPI](https://pypi.org/project/feast/) (or pip install feast) and try it out! Let us know on our [slack channel](http://slack.feastsite.wpenginepowered.com/). diff --git a/docs/blog/feast-0-18-adds-snowflake-support-and-data-quality-monitoring.md b/docs/blog/feast-0-18-adds-snowflake-support-and-data-quality-monitoring.md new file mode 100644 index 00000000000..4b4321e3259 --- /dev/null +++ b/docs/blog/feast-0-18-adds-snowflake-support-and-data-quality-monitoring.md @@ -0,0 +1,37 @@ +# Feast 0.18 adds Snowflake support and data quality monitoring + +*February 14, 2022* | *Felix Wang* + +We are delighted to announce the release of Feast [0.18](https://github.com/feast-dev/feast/releases/tag/v0.18.0), which introduces several new features and other improvements: + +* Snowflake offline store, which allows you to define and use features stored in Snowflake. +* [Experimental] Saved Datasets, which allow training datasets to be persisted in an offline store. +* [Experimental] Data quality monitoring, which allows you to validate your training data with Great Expectations. Future work will allow you to detect issues with upstream data pipelines and check for training-serving skew. +* Python feature server graduation from alpha status. +* Performance improvements to on demand feature views, protobuf serialization and deserialization, and the Python feature server. + +Experimental features are subject to API changes in the near future as we collect feedback. If you have thoughts, please don't hesitate to reach out to the Feast team through our [Slack](http://slack.feastsite.wpenginepowered.com/)! + +### Snowflake offline store + +Prior to Feast 0.18, Feast had first-class support for Google BigQuery and AWS Redshift as offline stores. In addition, there were various plugins for Snowflake, Azure, Postgres, and Hive. Feast 0.18 introduces first-class support for Snowflake as an offline store, so users can more easily leverage features defined in Snowflake. The Snowflake offline store can be used with the AWS, GCP, and Azure providers. + +### [Experimental] Saved Datasets + +Training datasets generated via `get_historical_features` can now be persisted in an offline store and reused later. This functionality will be primarily needed to generate reference datasets for validation purposes (see next section) but also could be useful in other use cases like caching results of a computationally intensive point-in-time join. + +### [Experimental] Data quality monitoring + +Feast 0.18 includes the first milestone of our data quality monitoring work. Many users have requested ways to validate their training and serving data, as well as monitor for training-serving skew. Feast 0.18 allows users to validate their training data through an integration with [Great Expectations](https://greatexpectations.io/). Users can declare one of the previously generated training datasets as a reference for this validation by persisting it as a "saved dataset" (see previous section). More details about future milestones of data quality monitoring can be found [here](https://docs.feastsite.wpenginepowered.com/v/master/reference/data-quality). There's also a [tutorial on validating historical features](https://docs.feastsite.wpenginepowered.com/v/master/how-to-guides/validation/validating-historical-features) that demonstrates all new concepts in action. + +### Performance improvements + +The Feast team and community members have made several significant performance improvements. For example, the Python feature server performance was improved by switching to a more efficient serving interface. Improving our protobuf serialization and deserialization logic led to speedups in on demand feature views. The Datastore implementation was also sped up by batching operations. For more details, please see our [blog post](https://feastsite.wpenginepowered.com/blog/feast-benchmarks/) with detailed benchmarks! + +### What's next + +We are collaborating with the community on the first milestone of the `feast plan` command, future milestones of data quality monitoring, and a consolidation of our online serving logic into Golang. + +In addition, there is active community work on adding support for Snowflake as an online store, merging the Azure plugin into the main Feast repo, and more. If you have thoughts on what to build next in Feast, please fill out this [form](https://docs.google.com/forms/d/e/1FAIpQLSfa1nR). + +Download Feast 0.18 today from [PyPI](https://pypi.org/project/feast/) diff --git a/docs/blog/feast-0-20-adds-api-and-connector-improvements.md b/docs/blog/feast-0-20-adds-api-and-connector-improvements.md new file mode 100644 index 00000000000..a15482b6344 --- /dev/null +++ b/docs/blog/feast-0-20-adds-api-and-connector-improvements.md @@ -0,0 +1,41 @@ +# Feast 0.20 adds API and connector improvements + +*April 21, 2022* | *Danny Chiao* + +We are delighted to announce the release of Feast 0.20, which introduces many new features and enhancements: + +* Many connector improvements and bug fixes (DynamoDB, Snowflake, Spark, Trino) + * Note: Trino has been officially bundled into Feast. You can now run this with `pip install "feast[trino]"`! +* Feast API changes +* [Experimental] Feast UI as an importable npm module +* [Experimental] Python SDK with embedded Go mode + +### Connector optimizations & bug fixes + +Key changes: + +* DynamoDB online store implementation is now much more efficient with batch feature retrieval (thanks [@TremaMiguel](https://github.com/TremaMiguel)!). As per updates on the [benchmark blog post](https://feastsite.wpenginepowered.com/blog/feast-benchmarks/), DynamoDB now is much more performant at high batch sizes for online feature retrieval! +* Snowflake offline store connector supports key pair authentication. +* Contrib plugins (documentation still pending, but see [old docs](https://github.com/Shopify/feast-trino)) + +### Feast API simplification + +In planning for upcoming functionality (data quality monitoring, batch + stream transformations), certain parts of the Feast API are changing. As part of this change, Feast 0.20 addresses API inconsistencies. No existing feature repos will be broken, and we intend to provide a migration script to help upgrade to the latest syntax. + +Key changes: + +* Naming changes (e.g. `FeatureView` changes from features -> schema) +* All Feast objects will be defined with keyword args (in practice not impacting users unless they use positional args) +* Key Feast object metadata will be consistently exposed through constructors (e.g. owner, description, name) +* [Experimental] Pushing transformed features (e.g. from a stream) directly to the online store: + * Favoring push sources + +### [Experimental] Feast Web UI + +See [https://github.com/feast-dev/feast/tree/master/ui](https://github.com/feast-dev/feast/tree/master/ui) to check out the new Feast Web UI! You can generate registry dumps via the Feast CLI and stand up the server at a local endpoint. You can also embed the UI as a React component and add custom tabs. + +### What's next + +In response to survey results (fill out this [form](https://forms.gle/9SpCeJnq3MayAqHe6) to give your input), the Feast community will be diving much more deeply into data quality monitoring, batch + stream transformations, and more performant / scalable materialization. + +The community is also actively involved in many efforts. Join [#feast-web-ui](https://tectonfeast.slack.com/channels/feast-web-ui) to get involved with helping on the Feast Web UI. diff --git a/docs/blog/feast-benchmarks.md b/docs/blog/feast-benchmarks.md new file mode 100644 index 00000000000..49cd0624ed4 --- /dev/null +++ b/docs/blog/feast-benchmarks.md @@ -0,0 +1,65 @@ +# Serving features in milliseconds with Feast feature store + +*February 1, 2022* | *Tsotne Tabidze, Oleksii Moskalenko, Danny Chiao* + +Feature stores are operational ML systems that serve data to models in production. The speed at which a feature store can serve features can have an impact on the performance of a model and user experience. In this blog post, we show how fast Feast is at serving features in production and describe considerations for deploying Feast. + +## Updates +Apr 19: Updated DynamoDB benchmarks for Feast 0.20 given batch retrieval improvements + +## Background + +One of the most common questions Feast users ask in our [community Slack](http://slack.feastsite.wpenginepowered.com/) is: how scalable / performant is Feast? (spoiler alert: Feast is *very* fast, serving features at <1.5ms @p99 when using Redis in the below benchmarks) + +In a survey conducted last year ([results](https://docs.google.com/forms/d/e/1FAIpQLScV2RX)), we saw that most users were tackling challenging problems like recommender systems (e.g. recommending items to buy) and fraud detection, and had strict latency requirements. + +Over 80% of survey respondents needed features to be read at less than 100ms (@p99). Taking into account that most users in this survey were supporting recommender systems, which often require ranking 100s-1000s of entities simultaneously, this becomes even more strict. Feature serving latency scales with batch size because of the need to query features for random entities and other sources of tail latency. + +In this blog, we present results from a benchmark suite ([RFC](https://docs.google.com/document/d/12UuvTQnTTCJ)), describe the benchmark setup, and provide recommendations for how to deploy Feast to meet different operational goals. + +## Considerations when deploying Feast + +There are a couple of decisions users need to make when deploying Feast to support online inference. There are two key decisions when it comes to performance: + +1. How to deploy a feature server +2. Choice of online store + +Each approach comes with different tradeoffs in terms of performance, scalability, flexibility, and ease of use. This post aims to help users decide between these approaches and enable users to easily set up their own benchmarks to see if Feast meets their own latency requirements. + +### How to deploy a feature server + +While all users setup a Feast feature repo in the same way (using the Python SDK to define and materialize features), users retrieve features from Feast in a few different ways (see also [Running Feast in Production](https://docs.feastsite.wpenginepowered.com/how-to-guides/running-feast-in-production)): + +1. Deploy a Java gRPC feature server (Beta) +2. Deploy a Python HTTP feature server +3. Deploy a serverless Python HTTP feature server on AWS Lambda +4. Use the Python client SDK to directly fetch features +5. (Advanced) Build a custom client (e.g in Go or Java) to directly read the registry and read from an online store + +The first four above come for free with Feast, while the fifth requires custom work. All options communicate with the same Feast registry component (managed by feast apply) to understand where features are stored. + +Deploying a feature server service (compared to using a Feast client that directly communicates with online stores) can enable many improvements such as better caching (e.g. across clients), improved data access management, rate limiting, centralized monitoring, supporting client libraries across multiple languages, etc. However, this comes at the cost of increased architectural complexity. Serverless architectures are on the other end of the spectrum, enabling simple deployments at the cost of latency overhead. + +### Choice of online stores + +Feast is highly pluggable and extensible and supports serving features from a range of online stores (e.g. Amazon DynamoDB, Google Cloud Datastore, Redis, PostgreSQL). Many users build their own plugins to support their specific needs / online stores. [Building a Feature Store](https://www.tecton.ai/blog/how-to-build-a-feature-store/) dives into some of the trade-offs between online stores. Easier to manage solutions like DynamoDB or Datastore often lose against Redis in terms of read performance and cost. Each store also has its own API idiosyncrasies that can impact performance. The Feast community is continuously optimizing store-specific performance. + +## Benchmark Results + +The raw data exists at [https://github.com/feast-dev/feast-benchmarks](https://github.com/feast-dev/feast-benchmarks). We choose a subset of comparisons here to answer some of the most common questions we hear from the community. + +### Summary + +* The Java feature server is very fast (e.g. p99 latency is ~1.3 ms for a single row fetch of 250 features) + * Note: The Java feature server is in Beta and does not support new functionality such as the more scalable SQL registry. + +The Beta Feast Java feature server with Redis provides very low latency retrieval (p99 < 1.5ms for single row retrieval of 250 features), but at increased architectural complexity, less first class support for functionality (e.g. no SQL registry support), and more overhead in managing Redis clusters. Using a Python server with other managed online stores like DynamoDB or Datastore is easier to manage. + +Note: there are managed services for Redis like Redis Enterprise Cloud which remove the additional complexity associated with managing Redis clusters and provide additional benefits. + +### What's next + +The community is always improving Feast performance, and we'll post updates to performance improvements in the future. Future improvements in the works include: + +* Improved on demand transformation performance +* Improved pooling of clients (e.g. we've seen that caching Google clients significantly improves response times and reduces memory consumption) diff --git a/docs/blog/feast-joins-the-linux-foundation-for-ai-data.md b/docs/blog/feast-joins-the-linux-foundation-for-ai-data.md new file mode 100644 index 00000000000..19af5632b8a --- /dev/null +++ b/docs/blog/feast-joins-the-linux-foundation-for-ai-data.md @@ -0,0 +1,37 @@ +# Feast Joins The Linux Foundation for AI & Data + +*January 22, 2021* | *Christina Harter* + +([Original post](https://lfaidata.foundation/blog/2020/11/10/feast-joins-lf-ai-data-as-new-incubation-project/)) + +LF AI & Data Foundation—the organization building an ecosystem to sustain open source innovation in artificial intelligence (AI), machine learning (ML), deep learning (DL), and Data open source projects—today is announcing FEAST as its latest Incubation Project. [Feast](https://feastsite.wpenginepowered.com/) (Feature Store) is an open source feature store for machine learning. + +Today, teams running operational machine learning systems are faced with many technical and organizational challenges: + +1. Models don't have a consistent view of feature data and are tightly coupled to data infrastructure. +2. Deploying new features in production is difficult. +3. Feature leakage decreases model accuracy. +4. Features aren't reused across projects. +5. Operational teams can't monitor the quality of data served to models. + +Developed collaboratively between [Gojek](https://www.gojek.com/) and [Google Cloud](https://cloud.google.com/) in 2018, Feast was open sourced in early 2019. The project sets out to address these challenges as follows: + +1. Providing a single data access layer that decouples models from the infrastructure used to generate, store, and serve feature data. +2. Decoupling the creation of features from the consumption of features through a centralized store, thereby allowing teams to ship features into production with minimal engineering support. +3. Providing point-in-time correct retrieval of feature data for both model training and online serving. +4. Encouraging reuse of features by allowing organizations to build a shared foundation of features. +5. Providing data-centric operational monitoring that ensures operational teams can run production machine learning systems confidently at scale. + +"Feast was created to address the data challenges we faced at Gojek while scaling machine learning for ride-hailing, food delivery, digital payments, fraud detection, and a myriad of other use cases" said Willem Pienaar, creator of Feast. "After open sourcing the project we've seen an explosion of demand for the software, leading to strong adoption and community growth. Entering the LF AI & Data Foundation is an important step for us toward decentralized governance and wider industry adoption and development." + +Jeremy Lewi, Kubeflow founder, said "Feast entering the LF AI & Data Foundation is both a major milestone for the project and recognition of the strides the project has made toward solving some of the hardest problems in productionizing data for machine learning. Technologies like Feast have the potential to shape the machine learning stack of the future, and with its incubation in LF AI & Data, the project now has the ideal environment to expand its community in building a best-in-class open source feature store." + +Dr. Ibrahim Haddad, Executive Director of LF AI & Data, said: "We are very excited to welcome FEAST to LF AI & Data and help it thrive in a vendor-neutral environment under an open governance model. With the addition of FEAST, we are increasing the number of hosted projects under the Data category and look forward to tighter collaboration between our data projects and all other projects to drive innovation in data, analytics, and AI open source technologies." + +LF AI & Data supports projects via a wide range of services, and the first step is joining as an Incubation Project. LF AI & Data will support the neutral open governance for FEAST to help foster the growth of the project. Check out the [Documentation](https://docs.feastsite.wpenginepowered.com/) to start working with FEAST today. Learn more about FEAST on their [GitHub](https://github.com/feast-dev/feast) and be sure to join the [FEAST-Announce](https://lists.lfaidata.foundation/g/feast-announce) and [FEAST-Technical-Discuss](https://lists.lfaidata.foundation/g/feast-technical-discuss) mail lists to join the community and stay connected on the latest updates. + +A warm welcome to FEAST! We look forward to the project's continued growth and success as part of the LF AI & Data Foundation. To learn about how to host an open source project with us, visit the [LF AI & Data website](https://lfaidata.foundation/proposal-and-hosting/). + +FEAST Key Links: +* [Website](https://feastsite.wpenginepowered.com/) +* [GitHub](https://github.com/feast-dev/feast) diff --git a/docs/blog/feast-release-0-12-adds-aws-redshift-and-dynamodb-stores.md b/docs/blog/feast-release-0-12-adds-aws-redshift-and-dynamodb-stores.md new file mode 100644 index 00000000000..c79008caaf8 --- /dev/null +++ b/docs/blog/feast-release-0-12-adds-aws-redshift-and-dynamodb-stores.md @@ -0,0 +1,46 @@ +# Feast 0.12 adds AWS Redshift and DynamoDB stores + +*August 11, 2021* | *Jules S. Damji, Tsotne Tabidze, and Achal Shah* + +We are delighted to announce [Feast 0.12](https://github.com/feast-dev/feast/blob/master/CHANGELOG.md) is released! With this release, Feast users can take advantage of AWS technologies such as Redshift and DynamoDB as feature store backends to power their machine learning models. We want to share three key additions that extend Feast's ecosystem and facilitate a convenient way to group features via a Feature Service for serving: + +1. Adding [AWS Redshift](https://aws.amazon.com/redshift/), a cloud data warehouse, as an offline store, which supports features serving for training and batch inference at high throughput + +Let's briefly take a peek at each and how easily you can use them through simple declarative APIs and configuration changes. + +### AWS Redshift as a feature store data source and an offline store + +Redshift data source allows you to fetch historical feature values from Redshift for building training datasets and materializing features into an online store (see below how to materialize). A data source is defined as part of the [Feast Declarative API](https://rtd.feastsite.wpenginepowered.com/en/latest/) in the feature repo directory's Python files. For example, `aws_datasource.py` defines a table from which we want to fetch features. + +```python +from feast import RedshiftSource + +my_redshift_source = RedshiftSource(table="redshift_driver_table") +``` + +### AWS DynamoDB as an online store + +To allow teams to scale up and support high volumes of online transactions requests for machine learning (ML) predictions, Feast now supports a scalable DynamoDB to serve fresh features to your model in production in the AWS cloud. To enable DynamoDB as your online store, just change `featore_store.yaml`: + +```yaml +project: fraud_detection +registry: data/registry.db +provider: aws +online_store: + type: dynamodb + region: us-west-2 +``` + +To materialize your features into your DynamoDB online store, simply issue the command: + +```bash +$ feast materialize +``` + +Use a Feature Service when you want to logically group features from multiple Feature Views. This way, when requested from Feast, all features will be returned from the feature store. `feature_store.get_historical_features(...)` and `feature_store.get_online_features(...)` + +### What's next + +We are working on a Feast tutorial use case on AWS, meanwhile you can check out other [tutorials in documentation](https://docs.feastsite.wpenginepowered.com/). For more documentation about the aforementioned features, check the following Feast links: + +* [Online stores](https://docs.feastsite.wpenginepowered.com/reference/online-stores/) diff --git a/docs/blog/feast-supports-vector-database.md b/docs/blog/feast-supports-vector-database.md new file mode 100644 index 00000000000..6463a271ae3 --- /dev/null +++ b/docs/blog/feast-supports-vector-database.md @@ -0,0 +1,43 @@ +# Feast Launches Support for Vector Databases 🚀 + +*July 25, 2024* | *Daniel Dowler, Francisco Javier Arceo* + +## Feast and Vector Databases + +With the rise of generative AI applications, the need to serve vectors has grown quickly. We are pleased to announce that Feast now supports (as an experimental feature in Alpha) embedding vector features for popular GenAI use-cases such as RAG (retrieval augmented generation). + +An important consideration is that GenAI applications using embedding vectors stand to benefit from a formal feature framework, just as traditional ML applications do. We are excited about adding support for embedding vector features because of the opportunity to improve GenAI backend operations. The integration of embedding vectors as features into Feast, allows GenAI developers to take advantage of MLOps best practices, lowering development time, improving quality of work, and sets the stage for [Retrieval Augmented Fine Tuning](https://techcommunity.microsoft.com/t5/ai-ai-platform-blog/retrieval-augmented-fine-tuning-raft-with-azure-ai/ba-p/3979114). + +## Setting Up a Document Embedding Feature View + +The [feast-workshop repo example](https://github.com/feast-dev/feast-workshop/tree/main) shows how Feast users can define feature views with vector database sources. They can easily convert text queries to embedding vectors, which are then matched against a vector database to retrieve closest vector records. All of this works seamlessly within the Feast toolset, so that vector features become a natural addition to the Feast feature store solution. + +Defining a feature backed by a vector database is very similar to defining other types of features in Feast. Specifically, we can use the FeatureView class with an Array type field. + +```python +from datetime import timedelta +from feast import FeatureView +from feast.types import Array, Float32 +from feast.field import Field + +for key, value in sorted(features.items()): + print(key, " : ", value) + +print_online_features(features) +``` + +## Supported Vector Databases + +The Feast development team has conducted preliminary testing with the following vector stores: + +* SQLite +* Postgres with the PGVector extension +* Elasticsearch + +There are many more vector store solutions available, and we are excited about discovering how Feast may work with them to support vector feature use-cases. We welcome community contributions in this area–if you have any thoughts feel free to join the conversation on GitHub + +## Final Thoughts + +Feast brings formal feature operations support to AI/ML teams, enabling them to produce models faster and at higher levels of quality. The need for feature store support naturally extends to vector embeddings as features from vector databases (i.e., online stores). Vector storage and retrieval is an active space with lots of development and solutions. We are excited by where the space is moving, and look forward to Feast's role in operationalizing embedding vectors as first class features in the MLOps ecosystem. + +If you are new to feature stores and MLOps, this is a great time to give Feast a try. Check out [Feast documentation](https://feast.dev/) and the [Feast GitHub](https://github.com/feast-dev/feast) page for more on getting started. Big thanks to [Hao Xu](https://www.linkedin.com/in/hao-xu-a04436103/) and the community for their contributions to this effort. diff --git a/docs/blog/go-feature-server-benchmarks.md b/docs/blog/go-feature-server-benchmarks.md new file mode 100644 index 00000000000..1bd2539cab2 --- /dev/null +++ b/docs/blog/go-feature-server-benchmarks.md @@ -0,0 +1,55 @@ +# Go feature server benchmarks + +*July 19, 2022* | *Felix Wang* + +## Background + +The Feast team published a [blog post](https://feastsite.wpenginepowered.com/blog/feast-benchmarks/) several months ago with latency benchmarks for all of our online feature retrieval options. Since then, we have built a Go feature server. It is currently in alpha mode, and only supports Redis as an online store. The docs are [here](https://docs.feastsite.wpenginepowered.com/reference/feature-servers/go-feature-server/). We recommend teams that require extremely low-latency feature serving to try the Go feature server. To test it, we ran our benchmarks against it; the results are presented below. + +## Benchmark Setup + +See [https://github.com/feast-dev/feast-benchmarks](https://github.com/feast-dev/feast-benchmarks) for the exact benchmark code. The feature servers were deployed in Docker on AWS EC2 instances (c5.4xlarge, 16vCPU, 64GiB memory). + +## Data and query patterns + +Feast's feature retrieval primarily manages retrieving the latest values of a given feature for specified entities. In this benchmark, the online stores contain: + +* 25 feature views (with 10 features per feature view) for a total of 250 features +* 1M entity rows + +As described in [RFC-031](https://docs.google.com/document/d/12UuvTQnTTCJ), we simulate different query patterns by additionally varying by number of entity rows in a request (i.e. *batch size*), requests per second, and the concurrency of the feature server. The goal here is to have numbers that apply to a diverse set of teams, regardless of their scale and typical query patterns. Users are welcome to extend the benchmark suite to better test their own setup. + +## Online store setup + +These benchmarks only used Redis as an online store. We used a single Redis server, run locally with Docker Compose on an EC2 instance. This should closely approximate usage of a separate Redis server in AWS. Typical network latency within the same availability zone in AWS is [< 1-2 ms](https://aws.amazon.com/blogs/architecture/improving-performance-and-reducing-cost-using-availability-zone-affinity/). In these benchmarks, we did not hit limits that required use of a Redis cluster. With higher batch sizes, the benchmark suite would likely only work with Redis clusters. Redis clusters should improve Feast's performance. + +## Benchmark Results + +### Summary + +* The Go feature server is very fast (e.g. p99 latency is ~3.9 ms for a single row fetch of 250 features) +* For the same number of features and batch size, the Go feature server is about 3-5x faster than the Python feature server + * Despite this, there are still compelling reasons to use Python, depending on your situation (e.g. simplicity of deployment) +* Feature server latency… + * scales linearly (moderate slope) with batch size + * scales linearly (low slope) with number of features + * does not substantially change as requests per seconds increase + +### Latency when varying by batch size + +For this comparison, we check retrieval of 50 features across 5 feature views. At p99, we see that Go significantly outperforms Python, by ~3-5x. It also scales much better with batch size. + +| Batch size | 1 | 10 | 20 | 30 | 40 | 50 | 60 | 70 | 80 | 90 | 100 | +|------------|---|----|----|----|----|----|----|----|----|----|----| +| Python | 7.23 | 15.14 | 23.96 | 32.80 | 41.44 | 50.43 | 59.88 | 94.57 | 103.28 | 111.93 | 124.87 | +| Go | 4.32 | 3.88 | 6.09 | 8.16 | 10.13 | 12.32 | 14.3 | 16.28 | 18.53 | 20.27 | 22.18 | + +### Latency when varying by number of requested features + +The Go feature server scales a bit better than the Python feature server in terms of supporting a large number of features: +p99 retrieval times (ms), varying by number of requested features (batch size = 1) + +| Num features | 50 | 100 | 150 | 200 | 250 | +|-------------|----|----|-----|-----|-----| +| Python | 8.42 | 10.28 | 13.36 | 16.69 | 45.41 | +| Go | 1.78 | 2.43 | 2.98 | 3.33 | 3.92 | diff --git a/docs/blog/how-danny-chiao-is-keeping-feast-simple.md b/docs/blog/how-danny-chiao-is-keeping-feast-simple.md new file mode 100644 index 00000000000..24955b5aa8c --- /dev/null +++ b/docs/blog/how-danny-chiao-is-keeping-feast-simple.md @@ -0,0 +1,31 @@ +# How Danny Chiao is Keeping Feast Simple + +*March 2, 2022* | *Claire Besset* + +Tecton Engineer Danny Chiao recently appeared on *The Feast Podcast* to have a conversation with host Demetrios Brinkmann, head of the [MLOps Community](https://mlops.community). Demetrios and Danny spent an hour together discussing why Danny left Google to work on Feast, what it's like to be a leader in an open-source community, and what the future holds for Feast and MLOps. You can read about the highlights from their conversation below, or listen to the full episode [here](https://anchor.fm/featurestore). + +## From Google to Feast + +Prior to joining Tecton, Danny spent 7.5 years at Google, working on everything from Google+ to Android to Google Workspace. As a machine learning engineer, he worked with stakeholders from both product and research teams. Bridging gaps between these teams was a two-way challenge: product teams needed help applying learnings from research teams, and research teams needed to be convinced to take on projects from the product space. + +In addition, it was difficult to share data from enterprise Google products with research teams due to security and privacy mandates. Danny's experience working on multiple ML products and interfacing between diverse stakeholder groups would later prove to be highly valuable in his role in the Feast open source community. + +What prompted Danny to leave Google and join Tecton? He noticed how the ML landscape outside of Google was starting to look very different from how it did internally. While Google was still using ETL jobs to read data from data lakes or databases and perform massive transformations, other companies were taking advantage of new data warehouse technologies: "I was hearing that the ecosystem for iterating, developing, and shipping models was oddly enough more mature outside of Google…Internally, a lot of these massive systems are dependent on the infrastructure, so you can't iterate as quickly." + +Excited by the innovations in ML infrastructure that were appearing in the broader community, Danny moved to Tecton to work on [Feast](https://www.tecton.ai/blog/feast-announcement/), an open-source feature store. [Feature stores](https://www.tecton.ai/blog/what-is-a-feature-store/) act as a central hub for feature data across an ML project's lifecycle, and are responsible for transforming raw data into features, storing and managing features, and serving features for training and prediction. Feature stores are quickly becoming a critical infrastructure for data science teams putting ML into production. + +## What it's like to work in the Feast open source community + +As a leader in the Feast community, Danny splits his time between engineering projects and community engagement. In working with the community, Danny is learning about the current and emerging use cases for Feast. One of the big challenges with Feast is its broad user-base: "We have users coming to us like, 'Hey, I don't have that much data. I don't have super-strict latency requirements. I don't need a lot of complexity.' Then you have the Twitters of the world who are like, 'Hey, we need massive scale, massive low latency.' There's definitely that tug." + +There are also diverse usecases for Feast, from recommender systems, to fraud detection, to credit scoring, to biotech. The solution has been to keep Feast as simple and streamlined as possible. It should be flexible and extensible enough to meet the needs of its broad community, but it also aims to be accessible for small companies just beginning machine learning operations. As Danny says, "You can't get all these new users to come in and enjoy value if it's going to take a really, really long time to stand something up." + +This was the vision behind the release of Feast 0.10, which is Python-centric and can run on a developer's local machine. Overall, Danny holds a very positive outlook on the future of collaboration within Feast, noting how the diversity of the community can be an asset: "If you can motivate the right people and drive people towards the same vision, then you can do things way faster than if you were just a small team executing on it." + +## The future for Feast + +What's on the docket for Feast development this year? They're working with companies like Twitter and Redis to get benchmarks on how performant Feast is and harden the serving layer. Danny's excited to work on data quality monitoring and make that practice more standardized in the community. He's also looking forward to the launch of the Feast Web UI, because users have been asking for easier ways to discover and share features and data pipelines. + +True to the vision of keeping Feast simple, the team is focused on targeting new users in the ML space and getting them from zero-to-one. This is the plan for a world where machine learning is becoming even more ubiquitous. "It's going to become something that is just expected of companies," Demetrios said. "Right now, it doesn't feel like we've even gotten at 2% of what is potentially possible if every single business is going to be using machine learning." Fortunately, feature stores are a technology that can dramatically shorten the time it takes a new company to begin realizing value from machine learning. + +From meeting the machine learning needs of a broad user base to helping new teams get started with ML, there's a lot of exciting work to be done at Feast! You can learn more about the Feast project on our [website](https://www.tecton.ai/feast/), or read updates in Danny's community newsletter on the [Feast google group](https://groups.google.com/g/feast-dev/). diff --git a/docs/blog/kubeflow-and-feast-with-david-aronchick.md b/docs/blog/kubeflow-and-feast-with-david-aronchick.md new file mode 100644 index 00000000000..ca8ec914696 --- /dev/null +++ b/docs/blog/kubeflow-and-feast-with-david-aronchick.md @@ -0,0 +1,31 @@ +# Kubeflow + FEAST With David Aronchick, Co-creator of Kubeflow + +*April 29, 2022* | *demetrios* + +A recent episode of *The Feast Podcast* featured the co-creator of [Kubeflow](https://www.kubeflow.org/), David Aronchick, along with hosts Willem Pienaar and Demetrios Brinkmann. David, Willem, and Demetrios talked about the complexities of setting up machine learning (ML) infrastructure today and what's needed in the future to improve this process. You can read about the highlights from the podcast below or listen to the full episode [here](https://anchor.fm/featurestore/episodes/Kubeflo...). + +## Creation and philosophy behind Kubeflow + +[Kubeflow](https://www.kubeflow.org/) is a project that improves the deployment process of ML workflows on [Kubernetes](https://kubernetes.io/), a system for managing containers. It's an open-source platform originally based on Google's internal method to deploy [TensorFlow](https://www.tensorflow.org/) models, and is available for public use. It can deploy systems everywhere that Kubernetes is supported: e.g. on-premise installations, Google Cloud, AWS, and Azure. + +For machine learning practitioners, training is usually done in one of two ways. If the data set is small, users typically work in a Jupyter notebook, which allows them to quickly iterate on the necessary parameters without having to do much manual setup. On the other hand, if the data set is very large, distributed training is required with many physical or virtual machines. + +Originally, Kubeflow started as a way to connect the two worlds, so one could start with a Jupyter notebook and then move into distributed training with more features, pipelines, and feature stores as the data set grows. By itself, Kubeflow did not provide these additional capabilities, but wanted to partner with a service that did — hence, the beginning of a great collaboration with [Feast](https://www.tecton.ai/feast/). David described how Kubeflow is built on a mix of services: Kubeflow defines the pipeline language, Feast provides the feature store, [Argo](https://argoproj.github.io/workflows/) does work under the hood, [Katib](https://www.kubeflow.org/docs/components/katib/...) provides a hyperparameter sweep, and [Seldon](https://www.kubeflow.org/docs/external-add-ons/...) provides an inference endpoint. As Kubeflow becomes more mature, the goal is to restructure from a monolithic infrastructure where many services are installed at once to become more clean and specialized, so users only install the services they need. Currently we can see that happening with the graduation of KServe. + +## Improving the collaboration between data scientists and software engineers + +Next, David discussed how data scientists and software engineers work together to build and deploy ML systems. Data scientists fine tune the parameters of the model while engineers work on productionizing the model — that is, making sure it runs smoothly without interruptions. Unfortunately, the production deploy process cannot be fully automated yet. One of the core problems is that the APIs for ML systems are complicated to use, which is a hindrance to data scientists. + +A lot of work in ML is closer to science, where hypotheses are made and tested, as opposed to software development, where there is an iterative process and new versions are always being shipped. If you start building a distributed model based on a large data set, it may be hard or impossible to work in an interactive notebook like Jupyter unless a completely new, smaller, model is created. + +The general process for ML practitioners is a pipeline, but the individual steps are often not clearly described so it is difficult to map each step to the correct tool for the job. A data scientist's daily work can often look like downloading a CSV, deleting a column of data, uploading it to a feature store, running a Python script, and then doing training. Willem stated the need for a better solution: "Small groups should be able to independently deploy solutions for specific use cases that solve business problems." David wants to make this pipeline easier to perform with existing tools: "While there certainly are components of that available in Kubeflow and Kubernetes and others, I'm thinking about what that next abstraction layer looks like and it should be available shortly." + +## What's needed to accelerate the industry + +The landscape of ML operations platforms is very complex. There are several infrastructure options out there: Kubernetes was chosen as the backbone of Kubeflow because it's simple to set up and tweak. Willem talked about the consolidation of ML and ML operations tools: "It's going to happen eventually because there's just too many out there, and they're not all going to make it. Right now, it's the breeding grounds, and then it's going to be survival of the fittest." We can already see this playing out with DataRobot acquiring Algorithmia, Snowflake purchasing Streamlit and a few days ago, Databricks buying Cortex Labs. + +For open-source projects like Kubeflow, there should be a working group around core components that establishes standards. It isn't necessary to have one person who makes all of the decisions in this space. If a new feature is needed, code discussions are 10% of the problem but the majority of the work is around deciding implementation details and making sure that it works. The fastest way to get something done is just to build it yourself and try to get it merged. + +David mentioned that to really improve the ecosystem for ML, we "need to develop not just a standard layer for describing the entire platform, but also a system that describes many of the common objects in machine learning: a feature, a feature store, a data set, a training run, an experiment, a serving inference, and so on. It will spur innovation because it defines a set of clear contracts that users can produce and consume." Currently, this is hard to do programmatically because the variety of systems means that auxiliary tools need to be written to connect data sets. + +If expanding the future of ML infrastructure sounds exciting to you, there's a lot of contributions that are needed! You can learn more about [Feast](https://www.tecton.ai/feast/), the feature store connected to Kubeflow, and start using it today. Jump in our [slack](http://slack.feastsite.wpenginepowered.com/) and say hi! diff --git a/docs/blog/machine-learning-data-stack-for-real-time-fraud-prediction-using-feast-on-gcp.md b/docs/blog/machine-learning-data-stack-for-real-time-fraud-prediction-using-feast-on-gcp.md new file mode 100644 index 00000000000..729787dc4a2 --- /dev/null +++ b/docs/blog/machine-learning-data-stack-for-real-time-fraud-prediction-using-feast-on-gcp.md @@ -0,0 +1,50 @@ +# Machine learning data stack for real-time fraud detection using Feast on GCP + +*September 8, 2021* | *Jay Parthasarthy and Jules S. Damji* + +A machine learning (ML) model decides whether your transaction is blocked or approved every time you purchase using your credit card. Fraud detection is a canonical use case for real-time ML. Predictions are made upon each request quickly while you wait at a point of sale for payment approval. + +Even though this is a common problem with ML, companies often build custom tooling to tackle these predictions. Like most ML problems, the hard part of fraud prediction is in the data. The fundamental data challenges are the following: + +1. Some data needed for prediction is available as part of the transaction request. This data is the easy part of passing to the model. +2. Other data (for example, a user's historical purchases) provides a high signal for predictions, but it isn't available as part of the transaction request. This data takes time to look up: it's stored in a batch system like a data warehouse. This data is challenging to fetch since it requires a system to handle many queries per second (QPS). +3. Together, they comprise ML features as signals to the model for predicting whether the requested transaction is fraudulent. + +[Feast](https://feastsite.wpenginepowered.com/) is an open-source feature store that helps teams use batch data for real-time ML applications. It's used as part of fraud [prediction and other high-volume transactions systems](https://www.youtube.com/watch?v=ED81DvicQuQ) to prevent fraud for billions of dollars worth of transactions at companies like [Gojek](https://www.gojek.com/en-id/) and [Postmates](https://postmates.com/). In this blog, we discuss how we can use Feast to build a stack for fraud predictions. You can also follow along on Google Cloud Platform (GCP) by running this [Colab tutorial notebook](https://colab.research.google.com/github/feast-dev/feast-fraud-tutorial). + +## Generic data stack for fraud detection + +Here's what a generic stack for fraud prediction looks like: + +## 1. Generating batch features from data sources + +The first step in deploying an ML model is to generate features from raw data stored in an offline system, such as a data warehouse (DWH) or a modern data lake. After that, we use these features in our ML model for training and inference. But before we get into the specifics of fraud detection related to our example below, let's quickly understand some high-level concepts. + +Data sources: This data repository records all historical transactions data for a user, account information, and any indication of user fraud history. Usually, it's a data warehouse (DHW) with respective tables. The diagram above shows that features are generated from these data sources and put into another offline store (or the same store). Using transformational queries, like SQL, this data, joined from multiple tables, could be injected or stored as another table in a DWH— refined and computed as features. + +Features used: In the fraud use case, one set of the raw data is a record of historical transactions. This record includes data about the transaction: +* Amount of transaction +* Timestamp when the event occurred +* User account information + +## 3. Materialize features to low-latency online stores + +We have a model that's ready for real-time inference. However, we won't be able to make predictions in real-time if we need to fetch or compute data out of the data warehouse on each request because it's slow. + +Feast allows you to make real-time predictions based on warehouse data by materializing it into an [online store](https://docs.feastsite.wpenginepowered.com/concepts/registry). Using the Feast CLI, you can incrementally materialize your data, from the current time on since the previous materialized data: + +```bash +feast materialize-incremental $(date -u +"%Y-%m-%dT%H:%M:%S") +``` + +With our feature values loaded into the online store, a low-latency key-value store, as shown in the diagram above, we can retrieve new data when a new transaction request arrives in our system. + +Note that the feast materialize-incremental command needs to be run regularly so that the online store can continue to contain fresh feature values. We suggest that you integrate this command into your company's scheduler (e.g., Airflow.) + +## Conclusion + +In summation, we outlined a general data stack for real-time fraudulent prediction use cases. We implemented an end-to-end fraud prediction system using [Feast on GCP](https://github.com/feast-dev/feast-fraud-tutorial) as part of our tutorial. + +We'd love to hear how your organization's setup differs. This setup roughly corresponds to the most common patterns we've seen from our users, but things are usually more complicated as teams introduce feature logging, streaming features, and operational databases. + +You can bootstrap a simple stack illustrated in this blog by running our [tutorial notebook on GCP](https://colab.research.google.com/github/feast-dev/feast-fraud-tutorial). From there, you can integrate your prediction service into your production application and start making predictions in real-time. We can't wait to see what you build with Feast, and please share with the [Feast community](http://slack.feastsite.wpenginepowered.com/). diff --git a/docs/blog/performance-test-for-python-based-feast-feature-server.md b/docs/blog/performance-test-for-python-based-feast-feature-server.md new file mode 100644 index 00000000000..f9ef252e932 --- /dev/null +++ b/docs/blog/performance-test-for-python-based-feast-feature-server.md @@ -0,0 +1,61 @@ +# Performance Test for the Python-Based Feast Feature Server: Comparison Between DataStax Astra DB (Based on Apache Cassandra), Google Datastore & Amazon DynamoDB + +*April 17, 2023* | *Stefano Lottini* + +## Introduction + +Feature stores are an essential part of the modern stack around machine learning (ML); in particular, the effort aimed at rationalizing the access patterns to the features associated with ML models by the various functions revolving around it (from data engineers to data scientists). At its core, a feature store provides a layer atop a persistent data store (a database) that facilitates shared access to the features associated with the entities belonging to a business domain, making it easier to retrieve them consistently for both training and prediction. + +Out of several well-established feature stores available today, the most popular open-source solution is arguably [Feast](https://feastsite.wpenginepowered.com/). With its active base of contributors and support for a growing list of backends to choose from, ML practitioners don't have to worry about the boilerplate setup of their data system and can focus on delivering their product—all while retaining the freedom to choose the backend that best suits their needs. + +Last year, the Feast team published [extensive benchmarks](https://feastsite.wpenginepowered.com/blog/feast-benchmarks/) comparing the performance of the feature store when using different storage layers for retrieval of "online" features (that is, up-to-date reads to calculate inferences, as opposed to batch or historical "offline" reads). The storage backends used in the test, each powered by its own Feast plugin, were: Redis (running locally), Google Datastore, and Amazon DynamoDB—the latter on the same cloud region as the testing client. The main takeaways were: + +* Redis yields the lowest response times (but at a cost; see below) +* Among the cloud DB vendors, DynamoDB is noticeably faster than Datastore +* Latencies increase with the number of features needed and, albeit less so, with the number of rows ("entities") + +Moreover, Feast offers an SDK for both Java and Python. Although choosing the Java stack for the feature server results in faster responses, the vast majority of Feast users work with a Python-centered stack. So in our tests, we'll focus on Python start-to-end setups. + +Surveys done by Feast also showed that more than 60% of the interviewees required that P99 latency stay below 50 ms. These ultra-low-latency ML use cases often fall in the [fraud detection](https://www.tecton.ai/blog/how-to-build-a-fraud-detection-ml-system/) and [recommender system](https://www.tecton.ai/blog/guide-to-building-online-recommender-systems/) categories. + +## Feature stores & Cassandra / Astra DB + +The need for persistent data stores is ubiquitous in any ML application—and it comes in all sizes and shapes, of which the "feature store" pattern is but a certain, albeit very common, instance. As is discussed at length in the Feast blog post, many factors influence the architectural choices for ML-based systems. Besides serving latency, there are considerations about fault tolerance and data redundancy, ease of use, pricing, ease of integration with the rest of the stack, and so forth. For example, in some cases, it may be convenient to employ an in-memory store such as Redis, trading data durability and ease of scaling for reduced response times. + +In this [recently published guide](https://planetcassandra.org/post/practitioners-guide-to-cassandra-for-ml/), the author highlights the fact that a feature store lies at the core of most ML-centered architectures lies, possibly (and, looking forward, more and more so) augmented with real-time capabilities owing to a combination of CDC (Change Data Capture), event-streaming technologies, and sometimes in-memory cache layers. The guide makes the case that Cassandra and DataStax's cloud-based DBaaS [Astra DB](https://astra.datastax.com/) (which is built on Cassandra) are great databases to build a feature store on top of, owing to the world-class fault tolerance, 100% uptime, and extremely low latencies it can offer out of the box. + +We then set out to extend the performance measurements to Astra DB, with the intent to provide hard data corroborating our claim that Cassandra and Astra DB are performant first-class choices for an online feature store. In other words, once the plugin made its way to Feast, we took the next logical step: running the very same testing already done for the other DBaaS choice, but this time on Astra DB. The next section reports on our findings. + +## Performance benchmarks for Feast on Astra DB + +The Feast team published a Github [repository](https://github.com/feast-dev/feast-benchmarks) with the code used for the benchmarks. We added coverage for Astra DB (plus a one-node Cassandra cluster running locally, serving the purpose of a functional test) and upgraded the Feast version used in all benchmarks to use v0.26 consistently. + +*Note: The original tests used v0.20 for DynamoDB, v0.17 for Datastore and v0.21 for Redis. Because we reproduced all pre-existing benchmarks, finding most values to be in acceptable agreement (see below for more remarks on this point), we are confident that upgrading the Feast version does not significantly alter the performance.* + +The tests have been run on comparable AWS and GCP machines (respectively c5.4xlarge and c2-standard-16 instances) running in the same region as the cloud database (thereby mimicking the desired architecture for a production system). We did not change any benchmark parameter in order to keep the comparison meaningful, even with prior results. As stated earlier, we focused on the Python feature server, which has a wider adoption among the Feast community and supports a broader ecosystem of plugins. + +Here's how we conducted the benchmarking. First, a moderate amount of synthetic "feature data" (10k entities with 250 integer features each, for a total of about 11 MB) was materialized to the online store. Then various one-minute test runs were performed, each with a certain choice of feature-retrieval parameters, all while collecting statistics (in particular, high percentiles) on the response time of these retrieval operations. The parameters that varied between runs were: + +* batch size (1 to 100 entities per request) + +Let's go back to the Cassandra plugin for Feast and examine some properties of how it was structured. + +First, one might notice that, regardless of which features are requested at runtime, the whole partition (i.e., all features for a given entity) is read. This was chosen to avoid using IN clauses when querying Cassandra; these are indeed discouraged unless the number of values is very, very small (as a rule of thumb, less than half a dozen). Moreover, since one does not know at write-time which features will be read together, there is no preferred way to arrange the clustering column(s) to have these features grouped together in the partition (as done, for example, with Facebook's ["feature re-ordering"](https://engineering.fb.com/2022/09/19/ml-applications/feature-store-announcement/) which purportedly results in a 30%-70% latency reduction). A reasonable compromise was then to always read the whole partition and apply client-side post-query filtering to avoid burdening the query coordinators with additional work—at the cost, of course, of increased network throughput. + +Second, when features from multiple entities are needed, the plugin makes good use of the execute_concurrently_with_args primitive offered by the Cassandra Python driver, thereby spawning one thread per partition and firing all requests at once (up to a maximum concurrency threshold, which can be configured). This leverages the excellent support for concurrency by the Cassandra architecture, which accounts for the observed moderate dependency of latencies on the batch size. + +## Conclusion + +We put the Cassandra plugin for Feast to test in the same way as other DBaaS plugins were tested; that is, using the Astra DB cloud database built on Cassandra, and we ran the same benchmarks that were applied to Redis, Datastore, and DynamoDB. + +Besides broadly confirming the previous results published by the Feast team, our main finding is that the performance with Astra DB is on par with that of AWS DynamoDB and noticeably better than that of Google Datastore. + +All these tests target the Python implementation. As mentioned in the Feast article, switching to a Java feature server greatly improves the performance, but requires a more convoluted setup and architecture and overall more expertise both for setup and maintenance. + +Other evidence points to the fact that, *if one is mainly concerned about performance*, replacing any feature store with a direct-to-DB implementation may be the best choice. In this regard, our extensive investigations clearly make the case that Cassandra is a good fit for ML applications, regardless of whether a feature store is involved or not. + +Some results might be made statistically stronger by more extensive tests, which could be a task for a future iteration of these performance benchmarks. It is possible that longer runs and/or much larger amounts of stored data would better highlight the underlying patterns in how the response times behave as a function of batch size and/or number of requested features. + +## Acknowledgements + +The author would like to thank Alan Ho, Scott Regan, and Jonathan Shook for a critical reading of this manuscript, and the Feast team for a pleasant and fruitful collaboration around the development (first) and the benchmarking (afterwards) of the Cassandra / Astra DB plugin for the namesake feature store. diff --git a/docs/blog/rbac-role-based-access-controls.md b/docs/blog/rbac-role-based-access-controls.md new file mode 100644 index 00000000000..683dd9c6940 --- /dev/null +++ b/docs/blog/rbac-role-based-access-controls.md @@ -0,0 +1,71 @@ +# Feast Launches Role Based Access Control (RBAC)! 🚀 + +*November 21, 2024* | *Daniele Martinoli, Francisco Javier Arceo* + +Feast is proud to introduce Role-Based Access Control (RBAC), a game-changing feature for secure and scalable feature store management. With RBAC, administrators can define granular access policies, ensuring each team has the appropriate permissions to access and manage only the data they need. Built on Kubernetes RBAC and OpenID Connect (OIDC), this powerful model enhances data governance, fosters collaboration, and makes Feast a trusted solution for teams handling sensitive, proprietary data. + +## What is the Feast Permission Model? + +Feast now supports Role Based Access Controls (RBAC) so you can secure and govern your data. If you ever wanted to securely partition your feature store across different teams, the new Feast permissions model is here to make that possible! + +This powerful feature allows administrators to configure granular authorization policies, letting them decide which users and groups can access specific resources and what operations they can perform. + +The default implementation is based on Role-Based Access Control (RBAC): user roles determine whether a user has permission to perform specific functions on registered resources. + +## Why is RBAC important for Feast? + +Feature stores often operate on sensitive, proprietary data and we want to make sure teams are able to govern the access and control of that data thoughtfully, while benefiting from transparent code and an open source community like Feast. + +That's why we built RBAC using [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) and [OpenID Connect protocol (OIDC)](https://auth0.com/docs/authenticate/protocols/openid-connect), ensuring secure, fine-grained access control in Feast. + +## What are the Benefits of using Feast Permissions? + +Using the Feast Permissions Model offers two key benefits: + +1. Securely share and partition your feature store: grant each team only the minimum privileges necessary to access and manage the relevant resources. +2. Adopt a Service-Oriented Architecture and leverage the benefits of a distributed system. + +## How Feast Uses RBAC + +### Permissions as Feast resources + +The RBAC configuration is defined using a new Feast object type called "Permission". Permissions are registered in the Feast registry and are defined and applied like all the other registry objects, using Python code. + +A permission is defined by these three components: + +* A resource: a Feast object that we want to secure against unauthorized access. It's identified by the matching type(s), a possibly empty list of name patterns and a dictionary of required tags. +* An action: a logical operation performed on the secured resource, such as managing the resource state with CREATE, DESCRIBE, UPDATE or DELETE, or accessing the resource data with READ and WRITE (differentiated by ONLINE and OFFLINE store types) +* A policy: the rule to enforce authorization decisions based on the current user. The default implementation uses role-based policies. + +The resource types supported by the permission framework are those defining the customer feature store: + +* Project +* Entity +* Clients use the feature store transparently, with authorization headers automatically injected in every request. +* Service-to-service communications are permitted automatically. + +Currently, only the following Python servers are supported in an authorized environment: +- Online REST feature server +- Offline Arrow Flight feature server +- gRPC Registry server + +### Configuring Feast Authorization + +For backward compatibility, by default no authorizations are enforced. The authorization functionality must be explicitly enabled using the auth configuration section in feature_store.yaml. Of course, all server and client applications must have a consistent configuration. + +Currently, feast supports [OIDC](https://auth0.com/docs/authenticate/protocols/openid-connect) and [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) authentication/authorization. + +* With OIDC authorization, the client uses an OIDC server to fetch a JSON Web Token (JWT), which is then included in every request. On the server side, the token is parsed to extract user roles and validate them against the configured permissions. +* With Kubernetes authorization, the client injects its service account JWT token into the request. The server then extracts the service account name from the token and uses it to look up the associated role in the Kubernetes RBAC resources. + +### Inspecting and Troubleshooting the Permissions Model + +The feast CLI includes a new permissions command to list the registered permissions, with options to identify the matching resources for each configured permission and the existing resources that are not covered by any permission. + +For troubleshooting purposes, it also provides a command to list all the resources and operations allowed to any managed role. + +## How Can I Get Started? + +This new feature includes working examples for both supported authorization protocols. You can start by experimenting with these examples to see how they fit your own feature store and assess their benefits. + +As this is a completely new functionality, your feedback will be extremely valuable. It will help us adapt the feature to meet real-world requirements and better serve our customers. diff --git a/docs/blog/streaming-feature-engineering-with-denormalized.md b/docs/blog/streaming-feature-engineering-with-denormalized.md new file mode 100644 index 00000000000..ace3ec2df6c --- /dev/null +++ b/docs/blog/streaming-feature-engineering-with-denormalized.md @@ -0,0 +1,135 @@ +# Streaming Feature Engineering with Denormalized + +*December 17, 2024* | *Matt Green* + +Learn how to use Feast with [Denormalized](https://www.denormalized.io/) + +Thank you to [Matt Green](https://www.linkedin.com/in/mgreen9/) and [Francisco Javier Arceo](https://www.linkedin.com/in/franciscojavierarceo) for their contributions! + +## Introduction + +Feature stores have become a critical component of the modern AI stack where they serve as a centralized repository for model features. Typically, they consist of both an offline store for aggregating large amounts of data while training models, and an online store, which allows for low latency delivery of specific features when running inference. + +A popular open source example is [Feast](https://feast.dev/), which allows users to store features together by ingesting data from different data sources. While Feast allows you to define features and query data stores using those definitions, it relies on external systems to calculate and update online features. This post will demonstrate how to use [denormalized](https://www.denormalized.io/) to build real-time feature pipelines. + +The full working example is available at the [feast-dev/feast-denormalized-tutorial](https://github.com/feast-dev/feast-denormalized-tutorial) repo. Instructions for configuring and running the example can be found in the README file. + +## The Problem + +Fraud detection is a classic example of a model that uses real-time features. Imagine you are building a model to detect fraudulent user sessions. One feature you would be interested in is the number of login attempts made by a user and how many of those were successful. You could calculate this feature by looking back in time over a sliding interval (AKA "a sliding window"). If you notice a large amount of failed login attempts over the previous 5 seconds, you might infer the account is being brute-forced and choose to invalidate the session and lock the account. + +To simulate this scenario, we wrote a simple script that emits fake login events to a Kafka cluster: [session_generator](https://github.com/feast-dev/feast-denormalized-tutorial). + +This script will emit json events according to the following schema: + +```python +@dataclass +timestamp: datetime +user_id: str +ip_address: str +success: bool +``` + +## Configuring the Feature Store with Feast + +Before we can start writing our features, we need to first configure the feature store. Feast makes this easy using a Python API. In Feast, features are referred to as Fields and are grouped into FeatureViews. FeatureViews have corresponding PushSources for ingesting data from online sources (i.e., we can push data to Feast). We also define an offline data store using the FileSource class, though we won't be using that in this example. + +```python +file_sources = [] +push_sources = [] +feature_views = [] + +for i in [1, 5, 10, 15]: + file_source = FileSource( + path=str(Path(__file__).parent / f"./data/auth_attempt_{i}.parquet"), + timestamp_field="timestamp", + ) + file_sources.append(file_source) + + push_source = PushSource( + name=f"auth_attempt_push_{i}", + batch_source=file_source, + ) + push_sources.append(push_source) + + feature_views.append( + FeatureView( + name=f"auth_attempt_view_w{i}", + entities=[auth_attempt], + schema=[ + Field(name="user_id", dtype=feast_types.String,), + Field(name="timestamp", dtype=feast_types.String,), + Field(name=f"{i}_success", dtype=feast_types.Int32,), + Field(name=f"{i}_total", dtype=feast_types.Int32,), + Field(name=f"{i}_ratio", dtype=feast_types.Float32,), + ], + source=push_source, + online=True, + ) + ) +``` + +The code creates 4 different FeatureViews each containing their own features. As discussed previously, fraud features can be calculated over a sliding interval. It can be useful to not only look at recent failed authentication attempts but also the aggregate of attempts made over longer time intervals. This could be useful when trying to detect things like credential testing which can happen over a longer period of time. + +In our example, we're creating 4 different FeatureViews that will ultimately be populated by 4 different window lengths. This can help our model detect various types of attacks over different time intervals. Before we can use our features, we'll need to run `feast apply` to set-up the online datastore. + +## Writing the Pipelines with Denormalized + +Now that we have our online data store configured, we need to write our data pipelines for computing the features. Simply speaking, these pipelines need to: + +1. Read messages from Kafka +2. Aggregate those messages over a varied timeframe +3. Write the resulting aggregate value to the feature store + +Denormalized makes this really easy. First, we create our DataStream object from a Context(): + +```python +ds = FeastDataStream( + Context().from_topic( + config.kafka_topic, + feature_service, f"auth_attempt_push*{config.feature_prefix}" + ) +) +``` + +This will start the Denormalized Rust stream processing engine, which is powered by DataFusion so it's ultra-fast. + +## Running Multiple Pipelines + +The write_feast_feature() method is a blocking call that continuously executes one pipeline to produce a set of features across a single sliding window. If we want to calculate features for using different sliding window lengths, will need to configure and start multiple pipelines. We can easily do this using the multiprocessing library in python: + +```python +for window_length in [1, 5, 10, 15]: + config = PipelineConfig( + window_length_ms=window_length * 1000, + slide_length_ms=1000, + feature_prefix=f"{window_length}", + kafka_bootstrap_servers=args.kafka_bootstrap_servers, + kafka_topic=args.kafka_topic, + ) + process = multiprocessing.Process( + target=run_pipeline, + args=(config,), + name=f"PipelineProcess-{window_length}", + daemon=False, + ) + processes.append(process) + +for p in processes: + try: + p.start() + except Exception as e: + logger.error(f"Failed to start process {p.name}: {e}") + cleanup_processes(processes) + return +``` + +For each group of features we defined earlier, we spin up a different system process with a different window length. Each process will then execute its own instance of the Denormalized stream processing engine, which has its own thread pools for effective parallelization of work. + +While this example demonstrates how you can easily run multiple Denormalized pipelines, in a production environment, you'd probably want each pipeline running in its own container. + +## Final Thoughts + +We've demonstrated how you can easily create real-time features using Feast and Denormalized. While working with streaming data can be a challenge, modern python libraries backed by fast native code are making it easier than ever to quickly iterate on model inputs. + +Denormalized is currently in the early stages of development. If you have any feedback or questions, feel free to reach out at [hello@denormalized.io](mailto:hello@denormalized.io). diff --git a/docs/blog/the-future-of-feast.md b/docs/blog/the-future-of-feast.md new file mode 100644 index 00000000000..f08547a1bf7 --- /dev/null +++ b/docs/blog/the-future-of-feast.md @@ -0,0 +1,38 @@ +# The Future of Feast + +*February 23, 2024* | *Willem Pienaar* + +AI has taken center stage with the rise of large language models, but production ML systems remain the lifeblood of most AI powered companies today. At the heart of these products are feature stores like Feast, serving real-time, batch, and streaming data points to ML models. I'd like to spend a moment taking stock on what we've accomplished over the last six years and what the growing Feast community has to look forward to. + +## Act 1: Gojek and Google + +Feast was started in 2018 as a [collaboration](https://cloud.google.com/blog/products/ai-machine-learning/introducing-feast-an-open-source-feature-store-for-machine-learning) between Gojek and our friends at Google Cloud. The primary motivation behind the project was to reign in the rampant duplication of feature engineering across the Southeast Asian decacorn's many ML teams. + +Almost immediately, the key challenge with feature stores became clear: Can it be generalized across various ML use cases? + +The natural way to answer that question is to battle test the software out in the open. So in late 2018, spurred on by our friends in the Kubeflow project, we open sourced Feast. A community quickly formed around the project. This group was mostly made up of software engineers at data rich technology companies, trying to find a way to help their ML teams productionize models at a much higher pace. + +Having a community centric approach is in the DNA of the project. All of our RFCs, discussions, designs, community calls, and code are open source. The project became a vehicle for ML platform teams globally to collaborate. Many teams saw the project as a means of stress testing their internal feature store designs, while others like Agoda, Zulily, Farfetch, and Postmates adopted the project wholesale and became core contributors. + +As time went by the demand grew for the project to have neutral ownership and formal governance. This led to us [entering the project into the Linux Foundation for AI in 2020](https://lfaidata.foundation/blog/2020/11/10/feast-joins-lf-ai-data-as-new-incubation-project/). + +## Act 2: Rise of the Feature Store + +By 2020, the demand for feature stores had reached a fever pitch. If you were dealing with more than just an Excel sheet of data, you were likely planning to either build or buy a feature store. A category formed around feature stores and MLOps. Being a neutrally governed open source project brought in a raft of contributions, which helped the project generalize not just to different data platforms and vendors, but also different use cases and deployment patterns. A few of the highlights include: + +* We worked closely with AI teams at [Snowflake](https://quickstarts.snowflake.com/guide/getting_started_with_feast/), [Azure](https://techcommunity.microsoft.com/t5/ai-customer-engineering-team/using-feast-feature-store-with-azure-ml/ba-p/2908404) + +It's also important to mention that by far the biggest contributor to Feast was [Tecton](https://www.tecton.ai/), who invested considerable resources into the project and helped create the category. + +Today, the project is battle hardened and stable. It's seen adoption and/or contribution from companies like Adyen, Affirm, Better, Cloudflare, Discover, Experian, Lowes, Red Hat, Robinhood, Palo Alto Networks, Porch, Salesforce, Seatgeek, Shopify, and Twitter, just to name a few. + +## Act 3: The Road to 1.0 + +The rate of change in AI has accelerated, and nowhere is it moving faster than in open source. Keeping up with this rate of change for AI infra requires the best minds, so with that we'd like to introduce a set of contributors who will be graduating to official project maintainers: + +* [Francisco Javier Arceo](https://www.linkedin.com/in/franciscojavierarceo/) – Engineering Manager, [Affirm](https://www.affirm.com/) +* [Hao Xu](https://www.linkedin.com/in/hao-xu-a04436103/) – Lead Software Engineer, J.P. Morgan + +Over the next few months these maintainers will focus on bringing the project to a major 1.0 release. In our next post we will take a closer look at what the road to 1.0 looks like. + +If you'd like to get involved, try out the project [over at GitHub](https://github.com/feast-dev/feast) or join our [Slack](https://feastopensource.slack.com) community! diff --git a/docs/blog/the-road-to-feast-1-0.md b/docs/blog/the-road-to-feast-1-0.md new file mode 100644 index 00000000000..27be07aadea --- /dev/null +++ b/docs/blog/the-road-to-feast-1-0.md @@ -0,0 +1,35 @@ +# The road to Feast 1.0 + +*February 28, 2024* | *Edson Tirelli* + +### Past Achievements and a Bright Future + +In the [previous blog](https://feast.dev/blog/the-future-of-feast/) we recapped Feast's journey over the last 6 years and hinted about what is coming in the future. We also announced a new group of maintainers that joined the project to help drive it to the 1.0 milestone. Today, we will drill down a little bit into the goals for the project towards that milestone. + +### The Goals for Feast 1.0 + +* Tighter Integration with [**Kubeflow**](https://www.kubeflow.org/): Recognizing the growing importance of Kubernetes in the ML workflow, a primary objective is to achieve a closer integration with [Kubeflow](https://www.kubeflow.org/). This will enable smoother workflows and enhanced scalability for ML projects. + +* Development of Enterprise Features: With the aim to make Feast more robust for enterprise usage, we are focusing on developing features that cater to the complex needs of large-scale organizations. These include advanced security measures, scalability enhancements, and improved data management capabilities. + +* Graduation from [**LF AI and Data Foundation Incubation**](https://landscape.lfai.foundation/?selected=feast): Currently incubating under the [LF AI and Data Foundation](https://landscape.lfai.foundation/?selected=feast), we are setting our sights on graduating Feast to become a fully-fledged project under the foundation. This step will mark a significant milestone in our journey, recognizing the maturity and stability of Feast. + +* Research and Development for Novel Use Cases: Keeping pace with the rapidly evolving ML landscape (e.g., Large Language Models and Retrieval Augmented Generation), we are committed to exploring new research areas. Our aim is to adapt Feast to support novel use cases, keeping it at the forefront of technology. + +* Support for Latest ML Model Advancements: As ML models become more sophisticated, Feast will evolve to support these advancements. This includes accommodating new model architectures and training techniques. + +This new phase is not just about setting goals but laying down a concrete roadmap to achieve Feast version 1.0. This version will encapsulate all our efforts towards making Feast more integrated, enterprise-ready, and aligned with the latest ML advancements. + +### Why Invest in Feast? + +Many industry applications of machine learning require intensely sophisticated data pipelines. Over the last decade, the data infrastructure and analytics community collaborated together to build powerful frameworks like dbt that enabled analytics to flourish. We believe Feast can do the same for the machine learning community–particularly those that spend most of their time on data pipelining and feature engineering. We believe Feast is a core foundation in the future of machine learning and we will build it to offer a standard set of patterns that will enable ML Engineering and ML Ops teams to leverage those patterns and industry best practices to avoid common pitfalls, while (1) offering the flexibility of choosing their own infrastructure and (2) providing ML Practitioners with a Python-based interface. + +### In Conclusion + +This transition marks a pivotal moment in Feast's journey. We are excited about the opportunities and challenges ahead. With the support of the ML community, the dedication of our new maintainers, and the clear vision set by our steward committee, Feast is poised to reach new heights and continue to be a pivotal tool in the ML ecosystem. + +We invite everyone to join us in this exciting journey and contribute to the future of Feast. Together, let's shape the next chapter in the evolution of feature stores and machine learning. + +For updates and discussions, join our [Slack channel](http://feastopensource.slack.com/) and follow our [GitHub repository](https://github.com/feast-dev/feast/). + +*This post reflects the collective vision and aspirations of the new Feast steward committee. For more detailed discussions and contributions, please reach out to us on our [community channels](https://docs.feast.dev/community).* diff --git a/docs/blog/what-is-a-feature-store.md b/docs/blog/what-is-a-feature-store.md new file mode 100644 index 00000000000..720d6ab1ab2 --- /dev/null +++ b/docs/blog/what-is-a-feature-store.md @@ -0,0 +1,85 @@ +# What is a Feature Store? + +*January 21, 2021* | *Willem Pienaar & Mike Del Balso* + +Blog co-authored with Mike Del Balso, Co-Founder and CEO of Tecton, and cross-posted [here](https://www.tecton.ai/blog/what-is-a-feature-store/) + +Data teams are starting to realize that operational machine learning requires solving data problems that extend far beyond the creation of data pipelines. In [Why We Need DevOps for ML Data](https://www.tecton.ai/blog/devops-ml-data/), Tecton highlighted some of the key data challenges that teams face when productionizing ML systems: + +* Accessing the right raw data +* Building features from raw data +* Combining features into training data +* Calculating and serving features in production +* Monitoring features in production + +Production data systems, whether for large scale analytics or real-time streaming, aren't new. However, *operational machine learning* — ML-driven intelligence built into customer-facing applications — is new for most teams. The challenge of deploying machine learning to production for operational purposes (e.g. recommender systems, fraud detection, personalization, etc.) introduces new requirements for our data tools. + +A new kind of ML-specific data infrastructure is emerging to make that possible. Increasingly Data Science and Data Engineering teams are turning towards feature stores to manage the data sets and data pipelines needed to productionize their ML applications. This post describes the key components of a modern feature store and how the sum of these parts act as a force multiplier on organizations, by reducing duplication of data engineering efforts, speeding up the machine learning lifecycle, and unlocking a new kind of collaboration across data science teams. + +Quick refresher: in ML, a feature is data used as an input signal to a predictive model. For example, if a credit card company is trying to predict whether a transaction is fraudulent, a useful feature might be *whether the transaction is happening in a foreign country*, or *how the size of this transaction compares to the customer's typical transaction*. When we refer to a feature, we're usually referring to the concept of that signal (e.g. "transaction_in_foreign_country"), not a specific value of the feature (e.g. not "transaction #1364 was in a foreign country"). + +## Enter the feature store + +*"The interface between models and data"* + +We first introduced feature stores in our blog post describing Uber's [Michelangelo](https://eng.uber.com/michelangelo-machine-learning-platform/) platform. Feature stores have since emerged as a necessary component of the operational machine learning stack. + +Feature stores make it easy to: +1. Productionize new features without extensive engineering support +2. Automate feature computation, backfills, and logging +3. Share and reuse feature pipelines across teams +4. Track feature versions, lineage, and metadata +5. Achieve consistency between training and serving data +6. Monitor the health of feature pipelines in production + +Feature stores aim to solve the full set of data management problems encountered when building and operating operational ML applications. A feature store is an ML-specific data system that: + +* Runs data pipelines that transform raw data into feature values +* Stores and manages the feature data itself, and +* Serves feature data consistently for training and inference purposes + +Feature stores bring economies of scale to ML organizations by enabling collaboration. When a feature is registered in a feature store, it becomes available for immediate reuse by other models across the organization. This reduces duplication of data engineering efforts and allows new ML projects to bootstrap with a library of curated production-ready features. + +## Components of a Feature Store + +There are 5 main components of a modern feature store: Transformation, Storage, Serving, Monitoring, and Feature Registry. + +In the following sections we'll give an overview of the purpose and typical capabilities of each of these sections. + +## Serving + +Models need access to fresh feature values for inference. Feature stores accomplish this by regularly recomputing features on an ongoing basis. Transformation jobs are orchestrated to ensure new data is processed and turned into fresh new feature values. These jobs are executed on data processing engines (e.g. Spark or Pandas) to which the feature store is connected. + +Model development introduces different transformation requirements. When iterating on a model, new features are often engineered to be used in training datasets that correspond to historical events (e.g. all purchases in the past 6 months). To support these use cases, feature stores make it easy to run "backfill jobs" that generate and persist historical values of a feature for training. Some feature stores automatically backfill newly registered features for preconfigured time ranges for registered training datasets. + +Transformation code is reused across environments preventing training-serving skew and frees teams from having to rewrite code from one environment to the next. Feature stores manage all feature-related resources (compute, storage, serving) holistically across the feature lifecycle. Automating repetitive engineering tasks needed to productionize a feature, they enable a simple and fast path-to-production. Management optimizations (e.g. retiring features that aren't being used by any models, or deduplicating feature transformations across models) can bring significant efficiencies, especially as teams grow increasingly the complexity of managing features manually. + +## Monitoring + +When something goes wrong in an ML system, it's usually a data problem. Feature stores are uniquely positioned to detect and surface such issues. They can calculate metrics on the features they store and serve that describe correctness and quality. Feature stores monitor these metrics to provide a signal of the overall health of an ML application. + +Feature data can be validated based on user defined schemas or other structural criteria. Data quality is tracked by monitoring for drift and training-serving skew. E.g. feature data served to models are compared to data on which the model was trained to detect inconsistencies that could degrade model performance. + +When running production systems, it's also important to monitor operational metrics. Feature stores track operational metrics relating to core functionality. E.g. metrics relating to feature storage (availability, capacity, utilization, staleness) or feature serving (throughput, latency, error rates). Other metrics describe the operations of important adjacent system components. For example, operational metrics for external data processing engines (e.g. job success rate, throughput, processing lag and rate). + +Feature stores make these metrics available to existing monitoring infrastructure. This allows ML application health to be monitored and managed with existing observability tools in the production stack. Having visibility into which features are used by which models, feature stores can automatically aggregate alerts and health metrics into views relevant to specific users, models, or consumers. + +It's not essential that all feature stores implement such monitoring internally, but they should at least provide the interfaces into which data quality monitoring systems can plug. Different ML use cases can have different, specialized monitoring needs so pluggability here is important. + +## Registry + +A critical component in all feature stores is a centralized registry of standardized feature definitions and metadata. The registry acts as a single source of truth for information about a feature in an organization. + +The registry is a central interface for user interactions with the feature store. Teams use the registry as a common catalog to explore, develop, collaborate on, and publish new definitions within and across teams. + +The registry allows for important metadata to be attached to feature definitions. This provides a route for tracking ownership, project or domain specific information, and a path to easily integrate with adjacent systems. This includes information about dependencies and versions which is used for lineage tracking. + +To help with common debugging, compliance, and auditing workflows, the registry acts as an immutable record of what's available analytically and what's actually running in production. + +## Where to go to get started + +We see features stores as the heart of the data flow in modern ML applications. They are quickly proving to be [critical infrastructure](https://a16z.com/2020/10/15/the-emerging-architectures-of-modern-data/) for data science teams putting ML into production. We expect 2021 to be a year of massive feature store adoption, as machine learning becomes a key differentiator for technology companies. + +There are a few options for getting started with feature stores: + +* [Feast](https://feastsite.wpenginepowered.com/) is a great option if you already have transformation pipelines to compute your features, but need a great storage and serving layer to help you use them in production. Feast is GCP/AWS only today, but we're working hard to make Feast available as a light-weight feature store for all environments. Stay tuned. diff --git a/docs/getting-started/architecture/push-vs-pull-model.md b/docs/getting-started/architecture/push-vs-pull-model.md index b205e97fc51..f1bd05a3e75 100644 --- a/docs/getting-started/architecture/push-vs-pull-model.md +++ b/docs/getting-started/architecture/push-vs-pull-model.md @@ -25,4 +25,4 @@ Implicit in the Push model are decisions about _how_ and _when_ to push feature From a developer's perspective, there are three ways to push feature values to the online store with different tradeoffs. -They are discussed further in the [Write Patterns](getting-started/architecture/write-patterns.md) section. +They are discussed further in the [Write Patterns](write-patterns.md) section. diff --git a/docs/getting-started/architecture/write-patterns.md b/docs/getting-started/architecture/write-patterns.md index 4674b5504d3..f92b4e9d83b 100644 --- a/docs/getting-started/architecture/write-patterns.md +++ b/docs/getting-started/architecture/write-patterns.md @@ -1,6 +1,6 @@ # Writing Data to Feast -Feast uses a [Push Model](getting-started/architecture/push-vs-pull-model.md) to push features to the online store. +Feast uses a [Push Model](push-vs-pull-model.md) to push features to the online store. This has two important consequences: (1) communication patterns between the Data Producer (i.e., the client) and Feast (i.e,. the server) and (2) feature computation and _feature value_ write patterns to Feast's online store. diff --git a/docs/getting-started/components/README.md b/docs/getting-started/components/README.md index e1c000abced..1b224056298 100644 --- a/docs/getting-started/components/README.md +++ b/docs/getting-started/components/README.md @@ -12,6 +12,10 @@ [online-store.md](online-store.md) {% endcontent-ref %} +{% content-ref url="feature-server.md" %} +[feature-server.md](feature-server.md) +{% endcontent-ref %} + {% content-ref url="batch-materialization-engine.md" %} [batch-materialization-engine.md](batch-materialization-engine.md) {% endcontent-ref %} @@ -23,3 +27,7 @@ {% content-ref url="authz_manager.md" %} [authz_manager.md](authz_manager.md) {% endcontent-ref %} + +{% content-ref url="open-telemetry.md" %} +[open-telemetry.md](open-telemetry.md) +{% endcontent-ref %} diff --git a/docs/getting-started/components/feature-server.md b/docs/getting-started/components/feature-server.md new file mode 100644 index 00000000000..4d961054ecb --- /dev/null +++ b/docs/getting-started/components/feature-server.md @@ -0,0 +1,42 @@ +# Feature Server + +The Feature Server is a core architectural component in Feast, designed to provide low-latency feature retrieval and updates for machine learning applications. + +It is a REST API server built using [FastAPI](https://fastapi.tiangolo.com/) and exposes a limited set of endpoints to serve features, push data, and support materialization operations. The server is scalable, flexible, and designed to work seamlessly with various deployment environments, including local setups and cloud-based systems. + +## Motivation + +In machine learning workflows, real-time access to feature values is critical for enabling low-latency predictions. The Feature Server simplifies this requirement by: + +1. **Serving Features:** Allowing clients to retrieve feature values for specific entities in real-time, reducing the complexity of direct interactions with the online store. +2. **Data Integration:** Providing endpoints to push feature data directly into the online or offline store, ensuring data freshness and consistency. +3. **Scalability:** Supporting horizontal scaling to handle high request volumes efficiently. +4. **Standardized API:** Exposing HTTP/JSON endpoints that integrate seamlessly with various programming languages and ML pipelines. +5. **Secure Communication:** Supporting TLS (SSL) for secure data transmission in production environments. + +## Architecture + +The Feature Server operates as a stateless service backed by two key components: + +- **[Online Store](./online-store.md):** The primary data store used for low-latency feature retrieval. +- **[Registry](./registry.md):** The metadata store that defines feature sets, feature views, and their relationships to entities. + +## Key Features + +1. **RESTful API:** Provides standardized endpoints for feature retrieval and data pushing. +2. **CLI Integration:** Easily managed through the Feast CLI with commands like `feast serve`. +3. **Flexible Deployment:** Can be deployed locally, via Docker, or on Kubernetes using Helm charts. +4. **Scalability:** Designed for distributed deployments to handle large-scale workloads. +5. **TLS Support:** Ensures secure communication in production setups. + +## Endpoints Overview + +| Endpoint | Description | +|------------------------------|-------------------------------------------------------------------------| +| `/get-online-features` | Retrieves feature values for specified entities and feature references. | +| `/push` | Pushes feature data to the online and/or offline store. | +| `/materialize` | Materializes features within a specific time range to the online store. | +| `/materialize-incremental` | Incrementally materializes features up to the current timestamp. | +| `/retrieve-online-documents` | Supports Vector Similarity Search for RAG (Alpha end-ponit) | +| `/docs` | API Contract for available endpoints | + diff --git a/docs/getting-started/components/offline-store.md b/docs/getting-started/components/offline-store.md index 48470c6547a..c04773e66b4 100644 --- a/docs/getting-started/components/offline-store.md +++ b/docs/getting-started/components/offline-store.md @@ -8,7 +8,7 @@ Offline stores are primarily used for two reasons: 1. Building training datasets from time-series features. 2. Materializing \(loading\) features into an online store to serve those features at low-latency in a production setting. -Offline stores are configured through the [feature\_store.yaml](../../reference/offline-stores/). +Offline stores are configured through the [feature\_store.yaml](../../reference/feature-repository/feature-store-yaml.md). When building training datasets or materializing features into an online store, Feast will use the configured offline store with your configured data sources to execute the necessary data operations. Only a single offline store can be used at a time. diff --git a/docs/getting-started/components/open-telemetry.md b/docs/getting-started/components/open-telemetry.md new file mode 100644 index 00000000000..bdffad1d27b --- /dev/null +++ b/docs/getting-started/components/open-telemetry.md @@ -0,0 +1,149 @@ +# OpenTelemetry Integration + +The OpenTelemetry integration in Feast provides comprehensive monitoring and observability capabilities for your feature serving infrastructure. This component enables you to track key metrics, traces, and logs from your Feast deployment. + +## Motivation + +Monitoring and observability are critical for production machine learning systems. The OpenTelemetry integration addresses these needs by: + +1. **Performance Monitoring:** Track CPU and memory usage of feature servers +2. **Operational Insights:** Collect metrics to understand system behavior and performance +3. **Troubleshooting:** Enable effective debugging through distributed tracing +4. **Resource Optimization:** Monitor resource utilization to optimize deployments +5. **Production Readiness:** Provide enterprise-grade observability capabilities + +## Architecture + +The OpenTelemetry integration in Feast consists of several components working together: + +- **OpenTelemetry Collector:** Receives, processes, and exports telemetry data +- **Prometheus Integration:** Enables metrics collection and monitoring +- **Instrumentation:** Automatic Python instrumentation for tracking metrics +- **Exporters:** Components that send telemetry data to monitoring systems + +## Key Features + +1. **Automated Instrumentation:** Python auto-instrumentation for comprehensive metric collection +2. **Metric Collection:** Track key performance indicators including: + - Memory usage + - CPU utilization + - Request latencies + - Feature retrieval statistics +3. **Flexible Configuration:** Customizable metric collection and export settings +4. **Kubernetes Integration:** Native support for Kubernetes deployments +5. **Prometheus Compatibility:** Integration with Prometheus for metrics visualization + +## Setup and Configuration + +To add monitoring to the Feast Feature Server, follow these steps: + +### 1. Deploy Prometheus Operator +Follow the [Prometheus Operator documentation](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md) to install the operator. + +### 2. Deploy OpenTelemetry Operator +Before installing the OpenTelemetry Operator: +1. Install `cert-manager` +2. Validate that the `pods` are running +3. Apply the OpenTelemetry operator: +```bash +kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml +``` + +For additional installation steps, refer to the [OpenTelemetry Operator documentation](https://github.com/open-telemetry/opentelemetry-operator). + +### 3. Configure OpenTelemetry Collector +Add the OpenTelemetry Collector configuration under the metrics section in your values.yaml file: + +```yaml +metrics: + enabled: true + otelCollector: + endpoint: "otel-collector.default.svc.cluster.local:4317" # sample + headers: + api-key: "your-api-key" +``` + +### 4. Add Instrumentation Configuration +Add the following annotations and environment variables to your deployment.yaml: + +```yaml +template: + metadata: + annotations: + instrumentation.opentelemetry.io/inject-python: "true" +``` + +```yaml +- name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://{{ .Values.service.name }}-collector.{{ .Release.namespace }}.svc.cluster.local:{{ .Values.metrics.endpoint.port}} +- name: OTEL_EXPORTER_OTLP_INSECURE + value: "true" +``` + +### 5. Add Metric Checks +Add metric checks to all manifests and deployment files: + +```yaml +{{ if .Values.metrics.enabled }} +apiVersion: opentelemetry.io/v1alpha1 +kind: Instrumentation +metadata: + name: feast-instrumentation +spec: + exporter: + endpoint: http://{{ .Values.service.name }}-collector.{{ .Release.Namespace }}.svc.cluster.local:4318 + env: + propagators: + - tracecontext + - baggage + python: + env: + - name: OTEL_METRICS_EXPORTER + value: console,otlp_proto_http + - name: OTEL_LOGS_EXPORTER + value: otlp_proto_http + - name: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED + value: "true" +{{end}} +``` + +### 6. Add Required Manifests +Add the following components to your chart: +- Instrumentation +- OpenTelemetryCollector +- ServiceMonitors +- Prometheus Instance +- RBAC rules + +### 7. Deploy Feast +Deploy Feast with metrics enabled: + +```bash +helm install feast-release infra/charts/feast-feature-server --set metric=true --set feature_store_yaml_base64="" +``` + +## Usage + +To enable OpenTelemetry monitoring in your Feast deployment: + +1. Set `metrics.enabled=true` in your Helm values +2. Configure the OpenTelemetry Collector endpoint +3. Deploy with proper annotations and environment variables + +Example configuration: +```yaml +metrics: + enabled: true + otelCollector: + endpoint: "otel-collector.default.svc.cluster.local:4317" +``` + +## Monitoring + +Once configured, you can monitor various metrics including: + +- `feast_feature_server_memory_usage`: Memory utilization of the feature server +- `feast_feature_server_cpu_usage`: CPU usage statistics +- Additional custom metrics based on your configuration + +These metrics can be visualized using Prometheus and other compatible monitoring tools. diff --git a/docs/getting-started/components/overview.md b/docs/getting-started/components/overview.md index ac0b99de8ab..05c7503d842 100644 --- a/docs/getting-started/components/overview.md +++ b/docs/getting-started/components/overview.md @@ -13,6 +13,7 @@ * **Deploy Model:** The trained model binary (and list of features) are deployed into a model serving system. This step is not executed by Feast. * **Prediction:** A backend system makes a request for a prediction from the model serving service. * **Get Online Features:** The model serving service makes a request to the Feast Online Serving service for online features using a Feast SDK. +* **Feature Retrieval:** The online serving service retrieves the latest feature values from the online store and returns them to the model serving service. ## Components @@ -24,6 +25,7 @@ A complete Feast deployment contains the following components: * Materialize (load) feature values into the online store. * Build and retrieve training datasets from the offline store. * Retrieve online features. +* **Feature Server:** The Feature Server is a REST API server that serves feature values for a given entity key and feature reference. The Feature Server is designed to be horizontally scalable and can be deployed in a distributed manner. * **Stream Processor:** The Stream Processor can be used to ingest feature data from streams and write it into the online or offline stores. Currently, there's an experimental Spark processor that's able to consume data from Kafka. * **Batch Materialization Engine:** The [Batch Materialization Engine](batch-materialization-engine.md) component launches a process which loads data into the online store from the offline store. By default, Feast uses a local in-process engine implementation to materialize data. However, additional infrastructure can be used for a more scalable materialization process. * **Online Store:** The online store is a database that stores only the latest feature values for each entity. The online store is either populated through materialization jobs or through [stream ingestion](../../reference/data-sources/push.md). diff --git a/docs/getting-started/concepts/data-ingestion.md b/docs/getting-started/concepts/data-ingestion.md index 3dd3fbbd927..55b54045d21 100644 --- a/docs/getting-started/concepts/data-ingestion.md +++ b/docs/getting-started/concepts/data-ingestion.md @@ -16,7 +16,7 @@ Feast supports primarily **time-stamped** tabular data as data sources. There ar * **Stream data sources**: Feast does **not** have native streaming integrations. It does however facilitate making streaming features available in different environments. There are two kinds of sources: * **Push sources** allow users to push features into Feast, and make it available for training / batch scoring ("offline"), for realtime feature serving ("online") or both. * **\[Alpha] Stream sources** allow users to register metadata from Kafka or Kinesis sources. The onus is on the user to ingest from these sources, though Feast provides some limited helper methods to ingest directly from Kafka / Kinesis topics. -* **(Experimental) Request data sources:** This is data that is only available at request time (e.g. from a user action that needs an immediate model prediction response). This is primarily relevant as an input into [**on-demand feature views**](../../../docs/reference/alpha-on-demand-feature-view.md), which allow light-weight feature engineering and combining features across sources. +* **(Experimental) Request data sources:** This is data that is only available at request time (e.g. from a user action that needs an immediate model prediction response). This is primarily relevant as an input into [**on-demand feature views**](../../../docs/reference/beta-on-demand-feature-view.md), which allow light-weight feature engineering and combining features across sources. ## Batch data ingestion diff --git a/docs/getting-started/concepts/dataset.md b/docs/getting-started/concepts/dataset.md index 829ad4284e5..3fabc48a140 100644 --- a/docs/getting-started/concepts/dataset.md +++ b/docs/getting-started/concepts/dataset.md @@ -7,7 +7,7 @@ Dataset's metadata is stored in the Feast registry and raw data (features, entit Dataset can be created from: 1. Results of historical retrieval -2. \[planned] Logging request (including input for [on demand transformation](../../reference/alpha-on-demand-feature-view.md)) and response during feature serving +2. \[planned] Logging request (including input for [on demand transformation](../../reference/beta-on-demand-feature-view.md)) and response during feature serving 3. \[planned] Logging features during writing to online store (from batch source or stream) ### Creating a saved dataset from historical retrieval diff --git a/docs/getting-started/concepts/feature-view.md b/docs/getting-started/concepts/feature-view.md index 6ebe4feacff..faaaf54408a 100644 --- a/docs/getting-started/concepts/feature-view.md +++ b/docs/getting-started/concepts/feature-view.md @@ -6,7 +6,14 @@ **Note**: Feature views do not work with non-timestamped data. A workaround is to insert dummy timestamps. {% endhint %} -A feature view is an object that represents a logical group of time-series feature data as it is found in a [data source](data-ingestion.md). Depending on the kind of feature view, it may contain some lightweight (experimental) feature transformations (see [\[Alpha\] On demand feature views](feature-view.md#alpha-on-demand-feature-views)). +A **feature view** is defined as a *collection of features*. + +- In the online settings, this is a *stateful* collection of +features that are read when the `get_online_features` method is called. +- In the offline setting, this is a *stateless* collection of features that are created when the `get_historical_features` +method is called. + +A feature view is an object representing a logical group of time-series feature data as it is found in a [data source](data-ingestion.md). Depending on the kind of feature view, it may contain some lightweight (experimental) feature transformations (see [\[Beta\] On demand feature views](../../reference/beta-on-demand-feature-view.md)). Feature views consist of: diff --git a/docs/getting-started/concepts/permission.md b/docs/getting-started/concepts/permission.md index a6353579687..8db67032878 100644 --- a/docs/getting-started/concepts/permission.md +++ b/docs/getting-started/concepts/permission.md @@ -69,7 +69,7 @@ Permission( name="feature-reader", types=[FeatureView, FeatureService], policy=RoleBasedPolicy(roles=["super-reader"]), - actions=[AuthzedAction.DESCRIBE, READ], + actions=[AuthzedAction.DESCRIBE, *READ], ) ``` diff --git a/docs/getting-started/faq.md b/docs/getting-started/faq.md index 6567ae181da..b790d6dd719 100644 --- a/docs/getting-started/faq.md +++ b/docs/getting-started/faq.md @@ -55,7 +55,7 @@ Yes. In earlier versions of Feast, we used Feast Spark to manage ingestion from There are several kinds of transformations: -* On demand transformations (See [docs](../reference/alpha-on-demand-feature-view.md)) +* On demand transformations (See [docs](../reference/beta-on-demand-feature-view.md)) * These transformations are Pandas transformations run on batch data when you call `get_historical_features` and at online serving time when you call \`get\_online\_features. * Note that if you use push sources to ingest streaming features, these transformations will execute on the fly as well * Batch transformations (WIP, see [RFC](https://docs.google.com/document/d/1964OkzuBljifDvkV-0fakp2uaijnVzdwWNGdz7Vz50A/edit)) diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index d35446ce7f0..a83897005fd 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -10,6 +10,9 @@ Feast (Feature Store) is an open-source feature store designed to facilitate the * *For Data Engineers*: Feast provides a centralized catalog for storing feature definitions allowing one to maintain a single source of truth for feature data. It provides the abstraction for reading and writing to many different types of offline and online data stores. Using either the provided python SDK or the feature server service, users can write data to the online and/or offline stores and then read that data out again in either low-latency online scenarios for model inference, or in batch scenarios for model training. +* *For AI Engineers*: Feast provides a platform designed to scale your AI applications by enabling seamless integration of richer data and facilitating fine-tuning. With Feast, you can optimize the performance of your AI models while ensuring a scalable and efficient data pipeline. + + For more info refer to [Introduction to feast](../README.md) ## Prerequisites diff --git a/docs/how-to-guides/customizing-feast/adding-a-new-offline-store.md b/docs/how-to-guides/customizing-feast/adding-a-new-offline-store.md index 28592f0cd1a..c8e0258fdf7 100644 --- a/docs/how-to-guides/customizing-feast/adding-a-new-offline-store.md +++ b/docs/how-to-guides/customizing-feast/adding-a-new-offline-store.md @@ -440,11 +440,10 @@ test-python-universal-spark: ### 7. Dependencies -Add any dependencies for your offline store to our `sdk/python/setup.py` under a new `__REQUIRED` list with the packages and add it to the setup script so that if your offline store is needed, users can install the necessary python packages. These packages should be defined as extras so that they are not installed by users by default. You will need to regenerate our requirements files. To do this, create separate pyenv environments for python 3.8, 3.9, and 3.10. In each environment, run the following commands: +Add any dependencies for your offline store to our `sdk/python/setup.py` under a new `__REQUIRED` list with the packages and add it to the setup script so that if your offline store is needed, users can install the necessary python packages. These packages should be defined as extras so that they are not installed by users by default. You will need to regenerate our requirements files: ``` -export PYTHON= -make lock-python-ci-dependencies +make lock-python-ci-dependencies-all ``` ### 8. Add Documentation diff --git a/docs/how-to-guides/customizing-feast/adding-support-for-a-new-online-store.md b/docs/how-to-guides/customizing-feast/adding-support-for-a-new-online-store.md index 5e26f133cef..ee75aa6b74f 100644 --- a/docs/how-to-guides/customizing-feast/adding-support-for-a-new-online-store.md +++ b/docs/how-to-guides/customizing-feast/adding-support-for-a-new-online-store.md @@ -25,7 +25,7 @@ OnlineStore class names must end with the OnlineStore suffix! ### Contrib online stores -New online stores go in `sdk/python/feast/infra/online_stores/contrib/`. +New online stores go in `sdk/python/feast/infra/online_stores/`. #### What is a contrib plugin? diff --git a/docs/how-to-guides/running-feast-in-production.md b/docs/how-to-guides/running-feast-in-production.md index 021a10ac1ca..aa277c2540a 100644 --- a/docs/how-to-guides/running-feast-in-production.md +++ b/docs/how-to-guides/running-feast-in-production.md @@ -203,7 +203,7 @@ feature_vector = fs.get_online_features( ).to_dict() ``` -### 4.2. Deploy Feast feature servers on Kubernetes +### 4.2. Deploy Feast feature servers on Kubernetes (Deprecated replaced by [feast-operator](../../infra/feast-operator/README.md)) To deploy a Feast feature server on Kubernetes, you can use the included [helm chart + tutorial](https://github.com/feast-dev/feast/tree/master/infra/charts/feast-feature-server) (which also has detailed instructions and an example tutorial). diff --git a/docs/reference/starting-feast-servers-tls-mode.md b/docs/how-to-guides/starting-feast-servers-tls-mode.md similarity index 60% rename from docs/reference/starting-feast-servers-tls-mode.md rename to docs/how-to-guides/starting-feast-servers-tls-mode.md index 366cd79d564..a868e17cf96 100644 --- a/docs/reference/starting-feast-servers-tls-mode.md +++ b/docs/how-to-guides/starting-feast-servers-tls-mode.md @@ -1,7 +1,9 @@ # Starting feast servers in TLS (SSL) mode. TLS (Transport Layer Security) and SSL (Secure Sockets Layer) are both protocols encrypts communications between a client and server to provide enhanced security.TLS or SSL words used interchangeably. This article is going to show the sample code to start all the feast servers such as online server, offline server, registry server and UI server in TLS mode. -Also show examples related to feast clients to communicate with the feast servers started in TLS mode. +Also show examples related to feast clients to communicate with the feast servers started in TLS mode. + +We assume you have basic understanding of feast terminology before going through this tutorial, if you are new to feast then we would recommend to go through existing [starter tutorials](./../../examples) of feast. ## Obtaining a self-signed TLS certificate and key In development mode we can generate a self-signed certificate for testing. In an actual production environment it is always recommended to get it from a trusted TLS certificate provider. @@ -17,15 +19,32 @@ The above command will generate two files You can use the public or private keys generated from above command in the rest of the sections in this tutorial. ## Create the feast demo repo for the rest of the sections. -create a feast repo using `feast init` command and use this repo as a demo for subsequent sections. +Create a feast repo and initialize using `feast init` and `feast apply` command and use this repo as a demo for subsequent sections. ```shell feast init feast_repo_ssl_demo -``` -Output is -``` +#output will be something similar as below Creating a new Feast repository in /Documents/Src/feast/feast_repo_ssl_demo. + +cd feast_repo_ssl_demo/feature_repo +feast apply + +#output will be something similar as below +Applying changes for project feast_repo_ssl_demo + +Created project feast_repo_ssl_demo +Created entity driver +Created feature view driver_hourly_stats +Created feature view driver_hourly_stats_fresh +Created on demand feature view transformed_conv_rate +Created on demand feature view transformed_conv_rate_fresh +Created feature service driver_activity_v1 +Created feature service driver_activity_v3 +Created feature service driver_activity_v2 + +Created sqlite table feast_repo_ssl_demo_driver_hourly_stats_fresh +Created sqlite table feast_repo_ssl_demo_driver_hourly_stats ``` You need to execute the feast cli commands from `feast_repo_ssl_demo/feature_repo` directory created from the above `feast init` command. @@ -68,7 +87,7 @@ entity_key_serialization_version: 2 auth: type: no_auth ``` -{% endcode %} + `cert` is an optional configuration to the public certificate path when the online server starts in TLS(SSL) mode. Typically, this file ends with `*.crt`, `*.cer`, or `*.pem`. @@ -106,14 +125,55 @@ entity_key_serialization_version: 2 auth: type: no_auth ``` -{% endcode %} `cert` is an optional configuration to the public certificate path when the registry server starts in TLS(SSL) mode. Typically, this file ends with `*.crt`, `*.cer`, or `*.pem`. ## Starting feast offline server in TLS mode -TBD +To start the offline server in TLS mode, you need to provide the private and public keys using the `--key` and `--cert` arguments with the `feast serve_offline` command. +```shell +feast serve_offline --key /path/to/key.pem --cert /path/to/cert.pem +``` +You will see the output something similar to as below. Note the server url starts in the `https` mode. + +```shell +11/07/2024 11:10:01 AM feast.offline_server INFO: Found SSL certificates in the args so going to start offline server in TLS(SSL) mode. +11/07/2024 11:10:01 AM feast.offline_server INFO: Offline store server serving at: grpc+tls://127.0.0.1:8815 +11/07/2024 11:10:01 AM feast.offline_server INFO: offline server starting with pid: [11606] +``` + +### Feast client connecting to remote offline sever started in TLS mode. + +Sometimes you may need to pass the self-signed public key to connect to the remote registry server started in SSL mode if you have not added the public key to the certificate store. +You have to add `scheme` to `https`. + +feast client example: + +```yaml +project: feast-project +registry: + registry_type: remote + path: https://localhost:6570 + cert: /path/to/cert.pem +provider: local +online_store: + path: http://localhost:6566 + type: remote + cert: /path/to/cert.pem +entity_key_serialization_version: 2 +offline_store: + type: remote + host: localhost + port: 8815 + scheme: https + cert: /path/to/cert.pem +auth: + type: no_auth +``` + +`cert` is an optional configuration to the public certificate path when the registry server starts in TLS(SSL) mode. Typically, this file ends with `*.crt`, `*.cer`, or `*.pem`. +`scheme` should be `https`. By default, it will be `http` so you have to explicitly configure to `https` if you are planning to connect to remote offline server which is started in TLS mode. ## Starting feast UI server (react app) in TLS mode To start the feast UI server in TLS mode, you need to provide the private and public keys using the `--key` and `--cert` arguments with the `feast ui` command. @@ -129,3 +189,8 @@ INFO: Waiting for application startup. INFO: Application startup complete. INFO: Uvicorn running on https://0.0.0.0:8888 (Press CTRL+C to quit) ``` + + +## Adding public key to CA trust store and configuring the feast to use the trust store. +You can pass the public key for SSL verification using the `cert` parameter, however, it is sometimes difficult to maintain individual certificates and pass them individually. +The alternative recommendation is to add the public certificate to CA trust store and set the path as an environment variable (e.g., `FEAST_CA_CERT_FILE_PATH`). Feast will use the trust store path in the `FEAST_CA_CERT_FILE_PATH` environment variable. \ No newline at end of file diff --git a/docs/project/development-guide.md b/docs/project/development-guide.md index b6137741906..5b2d0a521e8 100644 --- a/docs/project/development-guide.md +++ b/docs/project/development-guide.md @@ -54,8 +54,8 @@ See [Contribution process](./contributing.md) and [Community](../community.md) f ## Making a pull request We use the convention that the assignee of a PR is the person with the next action. -If the assignee is empty it means that no reviewer has been found yet. -If a reviewer has been found, they should also be the assigned the PR. +If the assignee is empty it means that no reviewer has been found yet. +If a reviewer has been found, they should also be the assigned the PR. Finally, if there are comments to be addressed, the PR author should be the one assigned the PR. PRs that are submitted by the general public need to be identified as `ok-to-test`. Once enabled, [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) will run a range of tests to verify the submission, after which community members will help to review the pull request. @@ -120,51 +120,39 @@ Note that this means if you are midway through working through a PR and rebase, ## Feast Python SDK and CLI ### Environment Setup -Setting up your development environment for Feast Python SDK and CLI: -1. Ensure that you have Docker installed in your environment. Docker is used to provision service dependencies during testing, and build images for feature servers and other components. +#### Tools +- Docker: Docker is used to provision service dependencies during testing, and build images for feature servers and other components. - Please note that we use [Docker with BuiltKit](https://docs.docker.com/develop/develop-images/build_enhancements/). - _Alternatively_ - To use [podman](https://podman.io/) on a Fedora or RHEL machine, follow this [guide](https://github.com/feast-dev/feast/issues/4190) -2. Ensure that you have `make` and Python (3.9 or above) installed. -3. _Recommended:_ Create a virtual environment to isolate development dependencies to be installed - ```sh - # create & activate a virtual environment - python -m venv venv/ - source venv/bin/activate - ``` -4. (M1 Mac only): Follow the [dev guide](https://github.com/feast-dev/feast/issues/2105) -5. Install uv. It is recommended to use uv for managing python dependencies. +- `make` is used to run various scripts +- [uv](https://docs.astral.sh/) for managing python dependencies. [installation instructions](https://docs.astral.sh/uv/getting-started/installation/) +- (M1 Mac only): Follow the [dev guide if you have issues](https://github.com/feast-dev/feast/issues/2105) +- (Optional): Node & Yarn (needed for building the feast UI) +- (Optional): [Pixi](https://pixi.sh/latest/) for recompile python lock files. Only when you make changes to requirements or simply want to update python lock files to reflect latest versioons. + +### Quick start +- create a new virtual env: `uv venv --python 3.11` (Replace the python version with your desired version) +- activate the venv: `source venv/bin/activate` +- Install dependencies `make install-python-dependencies-dev` + +### building the UI ```sh -curl -LsSf https://astral.sh/uv/install.sh | sh -``` -or -```ssh -pip install uv -``` -6. (Optional): Install Node & Yarn. Then run the following to build Feast UI artifacts for use in `feast ui` -``` make build-ui ``` -7. (Optional) install pixi. pixi is necessary to run step 8 for all python versions at once. -```sh -curl -fsSL https://pixi.sh/install.sh | bash -``` -8. (Optional): Recompile python lock files. Only when you make changes to requirements or simply want to update python lock files to reflect latest versioons. -```sh -make lock-python-dependencies-all -``` -9. Install development dependencies for Feast Python SDK and CLI. This will install package versions from the lock file, install editable version of feast and compile protobufs. -If running inside a virtual environment: +### Recompiling python lock files +Recompile python lock files. This only needs to be run when you make changes to requirements or simply want to update python lock files to reflect latest versions. + ```sh -make install-python-ci-dependencies-uv-venv +make lock-python-dependencies-all ``` -Otherwise: +### Building protos ```sh -make install-python-ci-dependencies-uv +make compile-protos-python ``` -10. Spin up Docker Image +### Building a docker image for development ```sh docker build -t docker-whale -f ./sdk/python/feast/infra/feature_servers/multicloud/Dockerfile . ``` @@ -405,7 +393,7 @@ It will: ### Testing with Github Actions workflows -Please refer to the maintainers [doc](maintainers.md) if you would like to locally test out the github actions workflow changes. +Please refer to the maintainers [doc](maintainers.md) if you would like to locally test out the github actions workflow changes. This document will help you setup your fork to test the ci integration tests and other workflows without needing to make a pull request against feast-dev master. ## Feast Data Storage Format @@ -414,4 +402,3 @@ Feast data storage contracts are documented in the following locations: * [Feast Offline Storage Format](https://github.com/feast-dev/feast/blob/master/docs/specs/offline_store_format.md): Used by BigQuery, Snowflake \(Future\), Redshift \(Future\). * [Feast Online Storage Format](https://github.com/feast-dev/feast/blob/master/docs/specs/online_store_format.md): Used by Redis, Google Datastore. - diff --git a/docs/project/release-process.md b/docs/project/release-process.md index 251b9338f0a..2cddca508cf 100644 --- a/docs/project/release-process.md +++ b/docs/project/release-process.md @@ -1,5 +1,43 @@ # Release process +The release process is automated through a GitHub Action called [release.yml](https://github.com/feast-dev/feast/blob/master/.github/workflows/release.yml). +Here's a diagram of the workflows: + +```mermaid +graph LR + A[get_dry_release_versions] --> B[validate_version_bumps] + B --> C[publish-web-ui-npm] + C --> D[release] +``` + +The release step will trigger an automated chore commit by the CI-bot ([example](https://github.com/feast-dev/feast/commit/121617053344117cdbfbb480882b10cc176245ac)). + +After the `release` step and release commit, the `publish` step will be triggered ([example](https://github.com/feast-dev/feast/actions/runs/13143995111)). + +The `publish` worfklow triggers this flow: + +```mermaid +graph TD + A[publish.yml] -->|triggers| B[publish_python_sdk.yml] + B -->|needs| C[publish_images.yml] + B -->|needs| D[publish_helm_charts.yml] + + subgraph B[publish_python_sdk.yml] + direction LR + B1[Checkout code] --> B2[Set up Python] --> B3[Install dependencies] --> B4[Run tests] --> B5[Build wheels] --> B6[Publish to PyPI] + end + + subgraph C[publish_images.yml] + direction LR + C1[Checkout code] --> C2[Set up Docker] --> C3[Build Docker images] --> C4[Push Docker images] + end + + subgraph D[publish_helm_charts.yml] + direction LR + D1[Checkout code] --> D2[Set up Helm] --> D3[Package Helm charts] --> D4[Publish Helm charts] + end +``` + ## Release process For Feast maintainers, these are the concrete steps for making a new release. diff --git a/docs/reference/alpha-vector-database.md b/docs/reference/alpha-vector-database.md index ae6b47f0422..861c3fcb114 100644 --- a/docs/reference/alpha-vector-database.md +++ b/docs/reference/alpha-vector-database.md @@ -7,20 +7,35 @@ Vector database allows user to store and retrieve embeddings. Feast provides gen ## Integration Below are supported vector databases and implemented features: -| Vector Database | Retrieval | Indexing | -|-----------------|-----------|----------| -| Pgvector | [x] | [ ] | -| Elasticsearch | [x] | [x] | -| Milvus | [ ] | [ ] | -| Faiss | [ ] | [ ] | -| SQLite | [x] | [ ] | -| Qdrant | [x] | [x] | +| Vector Database | Retrieval | Indexing | V2 Support* | Online Read | +|-----------------|-----------|----------|-------------|-------------| +| Pgvector | [x] | [ ] | [] | [] | +| Elasticsearch | [x] | [x] | [] | [] | +| Milvus | [x] | [x] | [x] | [x] | +| Faiss | [ ] | [ ] | [] | [] | +| SQLite | [x] | [ ] | [x] | [x] | +| Qdrant | [x] | [x] | [] | [] | + +*Note: V2 Support means the SDK supports retrieval of features along with vector embeddings from vector similarity search. Note: SQLite is in limited access and only working on Python 3.10. It will be updated as [sqlite_vec](https://github.com/asg017/sqlite-vec/) progresses. -## Example +{% hint style="danger" %} +We will be deprecating the `retrieve_online_documents` method in the SDK in the future. +We recommend using the `retrieve_online_documents_v2` method instead, which offers easier vector index configuration +directly in the Feature View and the ability to retrieve standard features alongside your vector embeddings for richer context injection. + +Long term we will collapse the two methods into one, but for now, we recommend using the `retrieve_online_documents_v2` method. +Beyond that, we will then have `retrieve_online_documents` and `retrieve_online_documents_v2` simply point to `get_online_features` for +backwards compatibility and the adopt industry standard naming conventions. +{% endhint %} + +**Note**: Milvus and SQLite implement the v2 `retrieve_online_documents_v2` method in the SDK. This will be the longer-term solution so that Data Scientists can easily enable vector similarity search by just flipping a flag. -See [https://github.com/feast-dev/feast-workshop/blob/rag/module_4_rag](https://github.com/feast-dev/feast-workshop/blob/rag/module_4_rag) for an example on how to use vector database. +## Examples + +- See the v0 [Rag Demo](https://github.com/feast-dev/feast-workshop/blob/rag/module_4_rag) for an example on how to use vector database using the `retrieve_online_documents` method (planning migration and deprecation (planning migration and deprecation). +- See the v1 [Milvus Quickstart](../../examples/rag/milvus-quickstart.ipynb) for a quickstart guide on how to use Feast with Milvus using the `retrieve_online_documents_v2` method. ### **Prepare offline embedding dataset** Run the following commands to prepare the embedding dataset: @@ -31,28 +46,26 @@ python batch_score_documents.py The output will be stored in `data/city_wikipedia_summaries.csv.` ### **Initialize Feast feature store and materialize the data to the online store** -Use the feature_store.yaml file to initialize the feature store. This will use the data as offline store, and Pgvector as online store. +Use the feature_store.yaml file to initialize the feature store. This will use the data as offline store, and Milvus as online store. ```yaml -project: feast_demo_local +project: local_rag provider: local -registry: - registry_type: sql - path: postgresql://@localhost:5432/feast +registry: data/registry.db online_store: - type: postgres + type: milvus + path: data/online_store.db vector_enabled: true - vector_len: 384 - host: 127.0.0.1 - port: 5432 - database: feast - user: "" - password: "" + embedding_dim: 384 + index_type: "IVF_FLAT" offline_store: type: file -entity_key_serialization_version: 2 +entity_key_serialization_version: 3 +# By default, no_auth for authentication and authorization, other possible values kubernetes and oidc. Refer the documentation for more details. +auth: + type: no_auth ``` Run the following command in terminal to apply the feature store configuration: @@ -63,75 +76,128 @@ feast apply Note that when you run `feast apply` you are going to apply the following Feature View that we will use for retrieval later: ```python -city_embeddings_feature_view = FeatureView( - name="city_embeddings", - entities=[item], +document_embeddings = FeatureView( + name="embedded_documents", + entities=[item, author], schema=[ - Field(name="Embeddings", dtype=Array(Float32)), + Field( + name="vector", + dtype=Array(Float32), + # Look how easy it is to enable RAG! + vector_index=True, + vector_search_metric="COSINE", + ), + Field(name="item_id", dtype=Int64), + Field(name="author_id", dtype=String), + Field(name="created_timestamp", dtype=UnixTimestamp), + Field(name="sentence_chunks", dtype=String), + Field(name="event_timestamp", dtype=UnixTimestamp), ], - source=source, - ttl=timedelta(hours=2), + source=rag_documents_source, + ttl=timedelta(hours=24), ) ``` -Then run the following command in the terminal to materialize the data to the online store: - -```shell -CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S") -feast materialize-incremental $CURRENT_TIME +Let's use the SDK to write a data frame of embeddings to the online store: +```python +store.write_to_online_store(feature_view_name='city_embeddings', df=df) ``` ### **Prepare a query embedding** +During inference (e.g., during when a user submits a chat message) we need to embed the input text. This can be thought of as a feature transformation of the input data. In this example, we'll do this with a small Sentence Transformer from Hugging Face. + ```python -from batch_score_documents import run_model, TOKENIZER, MODEL +import torch +import torch.nn.functional as F +from feast import FeatureStore +from pymilvus import MilvusClient, DataType, FieldSchema from transformers import AutoTokenizer, AutoModel - -question = "the most populous city in the U.S. state of Texas?" +from example_repo import city_embeddings_feature_view, item + +TOKENIZER = "sentence-transformers/all-MiniLM-L6-v2" +MODEL = "sentence-transformers/all-MiniLM-L6-v2" + +def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[ + 0 + ] # First element of model_output contains all token embeddings + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9 + ) + +def run_model(sentences, tokenizer, model): + encoded_input = tokenizer( + sentences, padding=True, truncation=True, return_tensors="pt" + ) + # Compute token embeddings + with torch.no_grad(): + model_output = model(**encoded_input) + + sentence_embeddings = mean_pooling(model_output, encoded_input["attention_mask"]) + sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1) + return sentence_embeddings + +question = "Which city has the largest population in New York?" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER) model = AutoModel.from_pretrained(MODEL) -query_embedding = run_model(question, tokenizer, model) -query = query_embedding.detach().cpu().numpy().tolist()[0] +query_embedding = run_model(question, tokenizer, model).detach().cpu().numpy().tolist()[0] ``` -### **Retrieve the top 5 similar documents** -First create a feature store instance, and use the `retrieve_online_documents` API to retrieve the top 5 similar documents to the specified query. +### **Retrieve the top K similar documents** +First create a feature store instance, and use the `retrieve_online_documents_v2` API to retrieve the top 5 similar documents to the specified query. ```python -from feast import FeatureStore -store = FeatureStore(repo_path=".") -features = store.retrieve_online_documents( - feature="city_embeddings:Embeddings", - query=query, - top_k=5 -).to_dict() - -def print_online_features(features): - for key, value in sorted(features.items()): - print(key, " : ", value) - -print_online_features(features) +context_data = store.retrieve_online_documents_v2( + features=[ + "city_embeddings:vector", + "city_embeddings:item_id", + "city_embeddings:state", + "city_embeddings:sentence_chunks", + "city_embeddings:wiki_summary", + ], + query=query_embedding, + top_k=3, + distance_metric='COSINE', +).to_df() ``` +### **Generate the Response** +Let's assume we have a base prompt and a function that formats the retrieved documents called `format_documents` that we +can then use to generate the response with OpenAI's chat completion API. +```python +FULL_PROMPT = format_documents(rag_context_data, BASE_PROMPT) -### Configuration +from openai import OpenAI -We offer [PGVector](https://github.com/pgvector/pgvector), [SQLite](https://github.com/asg017/sqlite-vec), [Elasticsearch](https://www.elastic.co) and [Qdrant](https://qdrant.tech/) as Online Store options for Vector Databases. - -#### Installation with SQLite +client = OpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), +) +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + {"role": "system", "content": FULL_PROMPT}, + {"role": "user", "content": question} + ], +) -If you are using `pyenv` to manage your Python versions, you can install the SQLite extension with the following command: -```bash -PYTHON_CONFIGURE_OPTS="--enable-loadable-sqlite-extensions" \ - LDFLAGS="-L/opt/homebrew/opt/sqlite/lib" \ - CPPFLAGS="-I/opt/homebrew/opt/sqlite/include" \ - pyenv install 3.10.14 +# And this will print the content. Look at the examples/rag/milvus-quickstart.ipynb for an end-to-end example. +print('\n'.join([c.message.content for c in response.choices])) ``` -And you can the Feast install package via: + +### Configuration and Installation + +We offer [Milvus](https://milvus.io/), [PGVector](https://github.com/pgvector/pgvector), [SQLite](https://github.com/asg017/sqlite-vec), [Elasticsearch](https://www.elastic.co) and [Qdrant](https://qdrant.tech/) as Online Store options for Vector Databases. + +Milvus offers a convenient local implementation for vector similarity search. To use Milvus, you can install the Feast package with the Milvus extra. + +#### Installation with Milvus ```bash -pip install feast[sqlite_vec] +pip install feast[milvus] ``` - #### Installation with Elasticsearch ```bash @@ -143,3 +209,17 @@ pip install feast[elasticsearch] ```bash pip install feast[qdrant] ``` +#### Installation with SQLite + +If you are using `pyenv` to manage your Python versions, you can install the SQLite extension with the following command: +```bash +PYTHON_CONFIGURE_OPTS="--enable-loadable-sqlite-extensions" \ + LDFLAGS="-L/opt/homebrew/opt/sqlite/lib" \ + CPPFLAGS="-I/opt/homebrew/opt/sqlite/include" \ + pyenv install 3.10.14 +``` + +And you can the Feast install package via: +```bash +pip install feast[sqlite_vec] +``` diff --git a/docs/reference/alpha-web-ui.md b/docs/reference/alpha-web-ui.md index 02dd107f1b4..80c5b824c5a 100644 --- a/docs/reference/alpha-web-ui.md +++ b/docs/reference/alpha-web-ui.md @@ -100,9 +100,9 @@ yarn start The advantage of importing Feast UI as a module is in the ease of customization. The `` component exposes a `feastUIConfigs` prop thorough which you can customize the UI. Currently it supports a few parameters. -**Fetching the Project List** +##### Fetching the Project List -You can use `projectListPromise` to provide a promise that overrides where the Feast UI fetches the project list from. +By default, the Feast UI fetches the project list from the app root path. You can use `projectListPromise` to provide a promise that overrides where it's fetched from. ```jsx ``` -**Custom Tabs** +##### Custom Tabs You can add custom tabs for any of the core Feast objects through the `tabsRegistry`. -``` +```jsx const tabsRegistry = { RegularFeatureViewCustomTabs: [ { diff --git a/docs/reference/data-sources/README.md b/docs/reference/data-sources/README.md index e69fbab8e36..09df6b861e8 100644 --- a/docs/reference/data-sources/README.md +++ b/docs/reference/data-sources/README.md @@ -34,6 +34,10 @@ Please see [Data Source](../../getting-started/concepts/data-ingestion.md) for a [kinesis.md](kinesis.md) {% endcontent-ref %} +{% content-ref url="couchbase.md" %} +[couchbase.md](couchbase.md) +{% endcontent-ref %} + {% content-ref url="spark.md" %} [spark.md](spark.md) {% endcontent-ref %} diff --git a/docs/reference/data-sources/couchbase.md b/docs/reference/data-sources/couchbase.md new file mode 100644 index 00000000000..596e33cf50d --- /dev/null +++ b/docs/reference/data-sources/couchbase.md @@ -0,0 +1,37 @@ +# Couchbase Columnar source (contrib) + +## Description + +Couchbase Columnar data sources are [Couchbase Capella Columnar](https://docs.couchbase.com/columnar/intro/intro.html) collections that can be used as a source for feature data. **Note that Couchbase Columnar is available through [Couchbase Capella](https://cloud.couchbase.com/).** + +## Disclaimer + +The Couchbase Columnar data source does not achieve full test coverage. +Please do not assume complete stability. + +## Examples + +Defining a Couchbase Columnar source: + +```python +from feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source import ( + CouchbaseColumnarSource, +) + +driver_stats_source = CouchbaseColumnarSource( + name="driver_hourly_stats_source", + query="SELECT * FROM Default.Default.`feast_driver_hourly_stats`", + database="Default", + scope="Default", + collection="feast_driver_hourly_stats", + timestamp_field="event_timestamp", + created_timestamp_column="created", +) +``` + +The full set of configuration options is available [here](https://rtd.feast.dev/en/master/#feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source.CouchbaseColumnarSource). + +## Supported Types + +Couchbase Capella Columnar data sources support `BOOLEAN`, `STRING`, `BIGINT`, and `DOUBLE` primitive types. +For a comparison against other batch data sources, please see [here](overview.md#functionality-matrix). diff --git a/docs/reference/data-sources/overview.md b/docs/reference/data-sources/overview.md index 5c2fdce9fd1..9880d388dde 100644 --- a/docs/reference/data-sources/overview.md +++ b/docs/reference/data-sources/overview.md @@ -18,14 +18,14 @@ Details for each specific data source can be found [here](README.md). Below is a matrix indicating which data sources support which types. -| | File | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | -| :-------------------------------- | :-- | :-- |:----------| :-- | :-- | :-- | :-- | -| `bytes` | yes | yes | yes | yes | yes | yes | yes | -| `string` | yes | yes | yes | yes | yes | yes | yes | -| `int32` | yes | yes | yes | yes | yes | yes | yes | -| `int64` | yes | yes | yes | yes | yes | yes | yes | -| `float32` | yes | yes | yes | yes | yes | yes | yes | -| `float64` | yes | yes | yes | yes | yes | yes | yes | -| `bool` | yes | yes | yes | yes | yes | yes | yes | -| `timestamp` | yes | yes | yes | yes | yes | yes | yes | -| array types | yes | yes | yes | no | yes | yes | no | \ No newline at end of file +| | File | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | Couchbase | +| :-------------------------------- | :-- | :-- |:----------| :-- | :-- | :-- | :-- |:----------| +| `bytes` | yes | yes | yes | yes | yes | yes | yes | yes | +| `string` | yes | yes | yes | yes | yes | yes | yes | yes | +| `int32` | yes | yes | yes | yes | yes | yes | yes | yes | +| `int64` | yes | yes | yes | yes | yes | yes | yes | yes | +| `float32` | yes | yes | yes | yes | yes | yes | yes | yes | +| `float64` | yes | yes | yes | yes | yes | yes | yes | yes | +| `bool` | yes | yes | yes | yes | yes | yes | yes | yes | +| `timestamp` | yes | yes | yes | yes | yes | yes | yes | yes | +| array types | yes | yes | yes | no | yes | yes | no | no | diff --git a/docs/reference/denormalized.md b/docs/reference/denormalized.md index 281e97de553..9ac39947f05 100644 --- a/docs/reference/denormalized.md +++ b/docs/reference/denormalized.md @@ -6,8 +6,8 @@ Denormalized makes it easy to compute real-time features and write them directly ## Prerequisites -- Python 3.8+ -- Kafka cluster (local or remote) +- Python 3.12+ +- Kafka cluster (local or remote) OR docker installed For a full working demo, check out the [feast-example](https://github.com/probably-nothing-labs/feast-example) repo. @@ -39,6 +39,13 @@ my-feature-project/ └── main.py # Pipeline runner ``` +3. Run a test Kafka instance in docker + +`docker run --rm -p 9092:9092 emgeee/kafka_emit_measurements:latest` + +This will spin up a docker container that runs a kafka instance and run a simple script to emit fake data to two topics. + + ## Define Your Features In `feature_repo/sensor_data.py`, define your feature view and entity: @@ -85,7 +92,7 @@ sample_event = { } # Create a stream from your Kafka topic -ds = FeastDataStream(Context().from_topic("temperature", json.dumps(sample_event), "localhost:9092")) +ds = FeastDataStream(Context().from_topic("temperature", json.dumps(sample_event), "localhost:9092", "occurred_at_ms")) # Define your feature computations ds = ds.window( @@ -106,7 +113,9 @@ feature_store = FeatureStore(repo_path="feature_repo/") ds.write_feast_feature(feature_store, "push_sensor_statistics") ``` + + ## Need Help? - Email us at hello@denormalized.io -- Check out more examples on our [GitHub](https://github.com/probably-nothing-labs/denormalized) +- Check out more examples on our [GitHub](https://github.com/probably-nothing-labs/denormalized/tree/main/py-denormalized/python/examples) diff --git a/docs/reference/feast-cli-commands.md b/docs/reference/feast-cli-commands.md index 8f1a7c302e6..712df18a6b6 100644 --- a/docs/reference/feast-cli-commands.md +++ b/docs/reference/feast-cli-commands.md @@ -19,6 +19,7 @@ Options: Commands: apply Create or update a feature store deployment + configuration Display Feast configuration entities Access entities feature-views Access feature views init Create a new Feast repository @@ -61,6 +62,28 @@ feast apply `feast apply` \(when configured to use cloud provider like `gcp` or `aws`\) will create cloud infrastructure. This may incur costs. {% endhint %} +## Configuration + +Display the actual configuration being used by Feast, including both user-provided configurations and default configurations applied by Feast. + +```bash +feast configuration +``` + +```yaml +project: foo +registry: data/registry.db +provider: local +online_store: + type: sqlite + path: data/online_store.db +offline_store: + type: dask +entity_key_serialization_version: 2 +auth: + type: no_auth +``` + ## Entities List all registered entities diff --git a/docs/reference/feature-servers/README.md b/docs/reference/feature-servers/README.md index 2ceaf5807f3..156e60c7431 100644 --- a/docs/reference/feature-servers/README.md +++ b/docs/reference/feature-servers/README.md @@ -1,4 +1,4 @@ -# Feature servers +# Feast servers Feast users can choose to retrieve features from a feature server, as opposed to through the Python SDK. @@ -12,4 +12,8 @@ Feast users can choose to retrieve features from a feature server, as opposed to {% content-ref url="offline-feature-server.md" %} [offline-feature-server.md](offline-feature-server.md) +{% endcontent-ref %} + +{% content-ref url="registry-server.md" %} +[registry-server.md](registry-server.md) {% endcontent-ref %} \ No newline at end of file diff --git a/docs/reference/feature-servers/go-feature-server.md b/docs/reference/feature-servers/go-feature-server.md deleted file mode 100644 index 8209799086a..00000000000 --- a/docs/reference/feature-servers/go-feature-server.md +++ /dev/null @@ -1,93 +0,0 @@ -# Go feature server - -## Overview - -The Go feature server is an HTTP/gRPC endpoint that serves features. -It is written in Go, and is therefore significantly faster than the Python feature server. -See this [blog post](https://feast.dev/blog/go-feature-server-benchmarks/) for more details on the comparison between Python and Go. -In general, we recommend the Go feature server for all production use cases that require extremely low-latency feature serving. -Currently only the Redis and SQLite online stores are supported. - -## CLI - -By default, the Go feature server is turned off. -To turn it on you can add `go_feature_serving: True` to your `feature_store.yaml`: - -{% code title="feature_store.yaml" %} -```yaml -project: my_feature_repo -registry: data/registry.db -provider: local -online_store: - type: redis - connection_string: "localhost:6379" -go_feature_serving: True -``` -{% endcode %} - -Then the `feast serve` CLI command will start the Go feature server. -As with Python, the Go feature server uses port 6566 by default; the port be overridden with a `--port` flag. -Moreover, the server uses HTTP by default, but can be set to use gRPC with `--type=grpc`. - -Alternatively, if you wish to experiment with the Go feature server instead of permanently turning it on, you can just run `feast serve --go`. - -## Installation - -The Go component comes pre-compiled when you install Feast with Python versions 3.8-3.10 on macOS or Linux (on x86). -In order to install the additional Python dependencies, you should install Feast with -``` -pip install feast[go] -``` -You must also install the Apache Arrow C++ libraries. -This is because the Go feature server uses the cgo memory allocator from the Apache Arrow C++ library for interoperability between Go and Python, to prevent memory from being accidentally garbage collected when executing on-demand feature views. -You can read more about the usage of the cgo memory allocator in these [docs](https://pkg.go.dev/github.com/apache/arrow/go/arrow@v0.0.0-20211112161151-bc219186db40/cdata#ExportArrowRecordBatch). - -For macOS, run `brew install apache-arrow`. -For linux users, you have to install `libarrow-dev`. -``` -sudo apt update -sudo apt install -y -V ca-certificates lsb-release wget -wget https://apache.jfrog.io/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -sudo apt install -y -V ./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -sudo apt update -sudo apt install -y -V libarrow-dev # For C++ -``` -For developers, if you want to build from source, run `make compile-go-lib` to build and compile the go server. In order to build the go binaries, you will need to install the `apache-arrow` c++ libraries. - -## Alpha features - -### Feature logging - -The Go feature server can log all requested entities and served features to a configured destination inside an offline store. -This allows users to create new datasets from features served online. Those datasets could be used for future trainings or for -feature validations. To enable feature logging we need to edit `feature_store.yaml`: -```yaml -project: my_feature_repo -registry: data/registry.db -provider: local -online_store: - type: redis - connection_string: "localhost:6379" -go_feature_serving: True -feature_server: - feature_logging: - enabled: True -``` - -Feature logging configuration in `feature_store.yaml` also allows to tweak some low-level parameters to achieve the best performance: -```yaml -feature_server: - feature_logging: - enabled: True - flush_interval_secs: 300 - write_to_disk_interval_secs: 30 - emit_timeout_micro_secs: 10000 - queue_capacity: 10000 -``` -All these parameters are optional. - -### Python SDK retrieval - -The logic for the Go feature server can also be used to retrieve features during a Python `get_online_features` call. -To enable this behavior, you must add `go_feature_retrieval: True` to your `feature_store.yaml`. -You must also have all the dependencies installed as detailed above. diff --git a/docs/reference/feature-servers/python-feature-server.md b/docs/reference/feature-servers/python-feature-server.md index d7374852495..348bfdcd3d8 100644 --- a/docs/reference/feature-servers/python-feature-server.md +++ b/docs/reference/feature-servers/python-feature-server.md @@ -226,13 +226,14 @@ feast serve --key /path/to/key.pem --cert /path/to/cert.pem ## API Endpoints and Permissions -| Endpoint | Resource Type | Permission | Description | -| ---------------------------- |---------------------------------|-------------------------------------------------------| ------------------------------------------------------------------------ | -| /get-online-features | FeatureView,OnDemandFeatureView | Read Online | Get online features from the feature store | -| /push | FeatureView | Write Online, Write Offline, Write Online and Offline | Push features to the feature store (online, offline, or both) | -| /write-to-online-store | FeatureView | Write Online | Write features to the online store | -| /materialize | FeatureView | Write Online | Materialize features within a specified time range | -| /materialize-incremental | FeatureView | Write Online | Incrementally materialize features up to a specified timestamp | +| Endpoint | Resource Type | Permission | Description | +|----------------------------|---------------------------------|-------------------------------------------------------|----------------------------------------------------------------| +| /get-online-features | FeatureView,OnDemandFeatureView | Read Online | Get online features from the feature store | +| /retrieve-online-documents | FeatureView | Read Online | Retrieve online documents from the feature store for RAG | +| /push | FeatureView | Write Online, Write Offline, Write Online and Offline | Push features to the feature store (online, offline, or both) | +| /write-to-online-store | FeatureView | Write Online | Write features to the online store | +| /materialize | FeatureView | Write Online | Materialize features within a specified time range | +| /materialize-incremental | FeatureView | Write Online | Incrementally materialize features up to a specified timestamp | ## How to configure Authentication and Authorization ? diff --git a/docs/reference/feature-servers/registry-server.md b/docs/reference/feature-servers/registry-server.md new file mode 100644 index 00000000000..9707a597035 --- /dev/null +++ b/docs/reference/feature-servers/registry-server.md @@ -0,0 +1,26 @@ +# Registry server + +## Description + +The Registry server uses the gRPC communication protocol to exchange data. +This enables users to communicate with the server using any programming language that can make gRPC requests. + +## How to configure the server + +## CLI + +There is a CLI command that starts the Registry server: `feast serve_registry`. By default, remote Registry Server uses port 6570, the port can be overridden with a `--port` flag. +To start the Registry Server in TLS mode, you need to provide the private and public keys using the `--key` and `--cert` arguments. +More info about TLS mode can be found in [feast-client-connecting-to-remote-registry-sever-started-in-tls-mode](../../how-to-guides/starting-feast-servers-tls-mode.md#starting-feast-registry-server-in-tls-mode) + +## How to configure the client + +Please see the detail how to configure Remote Registry client [remote.md](../registries/remote.md) + +# Registry Server Permissions and Access Control + +Please refer the [page](./../registry/registry-permissions.md) for more details on API Endpoints and Permissions. + +## How to configure Authentication and Authorization ? + +Please refer the [page](./../../../docs/getting-started/concepts/permission.md) for more details on how to configure authentication and authorization. \ No newline at end of file diff --git a/docs/reference/offline-stores/README.md b/docs/reference/offline-stores/README.md index 2b62c4e1f11..ab25fe9a276 100644 --- a/docs/reference/offline-stores/README.md +++ b/docs/reference/offline-stores/README.md @@ -26,6 +26,10 @@ Please see [Offline Store](../../getting-started/components/offline-store.md) fo [duckdb.md](duckdb.md) {% endcontent-ref %} +{% content-ref url="couchbase.md" %} +[couchbase.md](couchbase.md) +{% endcontent-ref %} + {% content-ref url="spark.md" %} [spark.md](spark.md) {% endcontent-ref %} diff --git a/docs/reference/offline-stores/couchbase.md b/docs/reference/offline-stores/couchbase.md new file mode 100644 index 00000000000..3ae0f68d4c2 --- /dev/null +++ b/docs/reference/offline-stores/couchbase.md @@ -0,0 +1,79 @@ +# Couchbase Columnar offline store (contrib) + +## Description + +The Couchbase Columnar offline store provides support for reading [CouchbaseColumnarSources](../data-sources/couchbase.md). **Note that Couchbase Columnar is available through [Couchbase Capella](https://cloud.couchbase.com/).** +* Entity dataframes can be provided as a SQL++ query or can be provided as a Pandas dataframe. A Pandas dataframe will be uploaded to Couchbase Capella Columnar as a collection. + +## Disclaimer + +The Couchbase Columnar offline store does not achieve full test coverage. +Please do not assume complete stability. + +## Getting started + +In order to use this offline store, you'll need to run `pip install 'feast[couchbase]'`. You can get started by then running `feast init -t couchbase`. + +To get started with Couchbase Capella Columnar: +1. Sign up for a [Couchbase Capella](https://cloud.couchbase.com/) account +2. [Deploy a Columnar cluster](https://docs.couchbase.com/columnar/admin/prepare-project.html) +3. [Create an Access Control Account](https://docs.couchbase.com/columnar/admin/auth/auth-data.html) + - This account should be able to read and write. + - For testing purposes, it is recommended to assign all roles to avoid any permission issues. +4. [Configure allowed IP addresses](https://docs.couchbase.com/columnar/admin/ip-allowed-list.html) + - You must allow the IP address of the machine running Feast. + + +## Example + +{% code title="feature_store.yaml" %} +```yaml +project: my_project +registry: data/registry.db +provider: local +offline_store: + type: couchbase.offline + connection_string: COUCHBASE_COLUMNAR_CONNECTION_STRING # Copied from Settings > Connection String page in Capella Columnar console, starts with couchbases:// + user: COUCHBASE_COLUMNAR_USER # Couchbase cluster access name from Settings > Access Control page in Capella Columnar console + password: COUCHBASE_COLUMNAR_PASSWORD # Couchbase password from Settings > Access Control page in Capella Columnar console + timeout: 120 # Timeout in seconds for Columnar operations, optional +online_store: + path: data/online_store.db +``` +{% endcode %} + +Note that `timeout`is an optional parameter. +The full set of configuration options is available in [CouchbaseColumnarOfflineStoreConfig](https://rtd.feast.dev/en/master/#feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase.CouchbaseColumnarOfflineStoreConfig). + + +## Functionality Matrix + +The set of functionality supported by offline stores is described in detail [here](overview.md#functionality). +Below is a matrix indicating which functionality is supported by the Couchbase Columnar offline store. + +| | Couchbase Columnar | +| :----------------------------------------------------------------- |:-------------------| +| `get_historical_features` (point-in-time correct join) | yes | +| `pull_latest_from_table_or_query` (retrieve latest feature values) | yes | +| `pull_all_from_table_or_query` (retrieve a saved dataset) | yes | +| `offline_write_batch` (persist dataframes to offline store) | no | +| `write_logged_features` (persist logged features to offline store) | no | + +Below is a matrix indicating which functionality is supported by `CouchbaseColumnarRetrievalJob`. + +| | Couchbase Columnar | +| ----------------------------------------------------- |--------------------| +| export to dataframe | yes | +| export to arrow table | yes | +| export to arrow batches | no | +| export to SQL | yes | +| export to data lake (S3, GCS, etc.) | yes | +| export to data warehouse | yes | +| export as Spark dataframe | no | +| local execution of Python-based on-demand transforms | yes | +| remote execution of Python-based on-demand transforms | no | +| persist results in the offline store | yes | +| preview the query plan before execution | yes | +| read partitioned data | yes | + +To compare this set of functionality against other offline stores, please see the full [functionality matrix](overview.md#functionality-matrix). diff --git a/docs/reference/offline-stores/overview.md b/docs/reference/offline-stores/overview.md index 182eac65864..191ccd21a64 100644 --- a/docs/reference/offline-stores/overview.md +++ b/docs/reference/offline-stores/overview.md @@ -31,28 +31,28 @@ Details for each specific offline store, such as how to configure it in a `featu Below is a matrix indicating which offline stores support which methods. -| | Dask | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | -| :-------------------------------- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | -| `get_historical_features` | yes | yes | yes | yes | yes | yes | yes | -| `pull_latest_from_table_or_query` | yes | yes | yes | yes | yes | yes | yes | -| `pull_all_from_table_or_query` | yes | yes | yes | yes | yes | yes | yes | -| `offline_write_batch` | yes | yes | yes | yes | no | no | no | -| `write_logged_features` | yes | yes | yes | yes | no | no | no | +| | Dask | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | Couchbase | +| :-------------------------------- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | +| `get_historical_features` | yes | yes | yes | yes | yes | yes | yes | yes | +| `pull_latest_from_table_or_query` | yes | yes | yes | yes | yes | yes | yes | yes | +| `pull_all_from_table_or_query` | yes | yes | yes | yes | yes | yes | yes | yes | +| `offline_write_batch` | yes | yes | yes | yes | no | no | no | no | +| `write_logged_features` | yes | yes | yes | yes | no | no | no | no | Below is a matrix indicating which `RetrievalJob`s support what functionality. -| | Dask | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | DuckDB | -| --------------------------------- | --- | --- | --- | --- | --- | --- | --- | --- | -| export to dataframe | yes | yes | yes | yes | yes | yes | yes | yes | -| export to arrow table | yes | yes | yes | yes | yes | yes | yes | yes | -| export to arrow batches | no | no | no | yes | no | no | no | no | -| export to SQL | no | yes | yes | yes | yes | no | yes | no | -| export to data lake (S3, GCS, etc.) | no | no | yes | no | yes | no | no | no | -| export to data warehouse | no | yes | yes | yes | yes | no | no | no | -| export as Spark dataframe | no | no | yes | no | no | yes | no | no | -| local execution of Python-based on-demand transforms | yes | yes | yes | yes | yes | no | yes | yes | -| remote execution of Python-based on-demand transforms | no | no | no | no | no | no | no | no | -| persist results in the offline store | yes | yes | yes | yes | yes | yes | no | yes | -| preview the query plan before execution | yes | yes | yes | yes | yes | yes | yes | no | -| read partitioned data | yes | yes | yes | yes | yes | yes | yes | yes | +| | Dask | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | DuckDB | Couchbase | +| --------------------------------- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| export to dataframe | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| export to arrow table | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| export to arrow batches | no | no | no | yes | no | no | no | no | no | +| export to SQL | no | yes | yes | yes | yes | no | yes | no | yes | +| export to data lake (S3, GCS, etc.) | no | no | yes | no | yes | no | no | no | yes | +| export to data warehouse | no | yes | yes | yes | yes | no | no | no | yes | +| export as Spark dataframe | no | no | yes | no | no | yes | no | no | no | +| local execution of Python-based on-demand transforms | yes | yes | yes | yes | yes | no | yes | yes | yes | +| remote execution of Python-based on-demand transforms | no | no | no | no | no | no | no | no | no | +| persist results in the offline store | yes | yes | yes | yes | yes | yes | no | yes | yes | +| preview the query plan before execution | yes | yes | yes | yes | yes | yes | yes | no | yes | +| read partitioned data | yes | yes | yes | yes | yes | yes | yes | yes | yes | diff --git a/docs/reference/online-stores/couchbase.md b/docs/reference/online-stores/couchbase.md index ff8822d85d9..2878deb97ee 100644 --- a/docs/reference/online-stores/couchbase.md +++ b/docs/reference/online-stores/couchbase.md @@ -38,7 +38,7 @@ project: my_feature_repo registry: data/registry.db provider: local online_store: - type: couchbase + type: couchbase.online connection_string: couchbase://127.0.0.1 # Couchbase connection string, copied from 'Connect' page in Couchbase Capella console user: Administrator # Couchbase username from access credentials password: password # Couchbase password from access credentials diff --git a/docs/reference/online-stores/milvus.md b/docs/reference/online-stores/milvus.md new file mode 100644 index 00000000000..014c7bd68a5 --- /dev/null +++ b/docs/reference/online-stores/milvus.md @@ -0,0 +1,65 @@ +# Redis online store + +## Description + +The [Milvus](https://milvus.io/) online store provides support for materializing feature values into Milvus. + +* The data model used to store feature values in Milvus is described in more detail [here](../../specs/online\_store\_format.md). + +## Getting started +In order to use this online store, you'll need to install the Milvus extra (along with the dependency needed for the offline store of choice). E.g. + +`pip install 'feast[milvus]'` + +You can get started by using any of the other templates (e.g. `feast init -t gcp` or `feast init -t snowflake` or `feast init -t aws`), and then swapping in Redis as the online store as seen below in the examples. + +## Examples + +Connecting to a local MilvusDB instance: + +{% code title="feature_store.yaml" %} +```yaml +project: my_feature_repo +registry: data/registry.db +provider: local +online_store: + type: milvus + path: "data/online_store.db" + connection_string: "localhost:6379" + embedding_dim: 128 + index_type: "FLAT" + metric_type: "COSINE" + username: "username" + password: "password" +``` +{% endcode %} + + +The full set of configuration options is available in [MilvusOnlineStoreConfig](https://rtd.feast.dev/en/latest/#feast.infra.online_stores.milvus.MilvusOnlineStoreConfig). + +## Functionality Matrix + +The set of functionality supported by online stores is described in detail [here](overview.md#functionality). +Below is a matrix indicating which functionality is supported by the Milvus online store. + +| | Milvus | +|:----------------------------------------------------------|:-------| +| write feature values to the online store | yes | +| read feature values from the online store | yes | +| update infrastructure (e.g. tables) in the online store | yes | +| teardown infrastructure (e.g. tables) in the online store | yes | +| generate a plan of infrastructure changes | no | +| support for on-demand transforms | yes | +| readable by Python SDK | yes | +| readable by Java | no | +| readable by Go | no | +| support for entityless feature views | yes | +| support for concurrent writing to the same key | yes | +| support for ttl (time to live) at retrieval | yes | +| support for deleting expired data | yes | +| collocated by feature view | no | +| collocated by feature service | no | +| collocated by entity key | no | +| vector similarity search | yes | + +To compare this set of functionality against other online stores, please see the full [functionality matrix](overview.md#functionality-matrix). diff --git a/docs/reference/online-stores/overview.md b/docs/reference/online-stores/overview.md index 04d24447058..b54329ad613 100644 --- a/docs/reference/online-stores/overview.md +++ b/docs/reference/online-stores/overview.md @@ -34,21 +34,21 @@ Details for each specific online store, such as how to configure it in a `featur Below is a matrix indicating which online stores support what functionality. -| | Sqlite | Redis | DynamoDB | Snowflake | Datastore | Postgres | Hbase | [[Cassandra](https://cassandra.apache.org/_/index.html) / [Astra DB](https://www.datastax.com/products/datastax-astra?utm_source=feast)] | [IKV](https://inlined.io) | -| :-------------------------------------------------------- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | -| write feature values to the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| read feature values from the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| update infrastructure (e.g. tables) in the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| teardown infrastructure (e.g. tables) in the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| generate a plan of infrastructure changes | yes | no | no | no | no | no | no | yes | no | -| support for on-demand transforms | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| readable by Python SDK | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| readable by Java | no | yes | no | no | no | no | no | no | no | -| readable by Go | yes | yes | no | no | no | no | no | no | no | -| support for entityless feature views | yes | yes | yes | yes | yes | yes | yes | yes | yes | -| support for concurrent writing to the same key | no | yes | no | no | no | no | no | no | yes | -| support for ttl (time to live) at retrieval | no | yes | no | no | no | no | no | no | no | -| support for deleting expired data | no | yes | no | no | no | no | no | no | no | -| collocated by feature view | yes | no | yes | yes | yes | yes | yes | yes | no | -| collocated by feature service | no | no | no | no | no | no | no | no | no | -| collocated by entity key | no | yes | no | no | no | no | no | no | yes | +| | Sqlite | Redis | DynamoDB | Snowflake | Datastore | Postgres | Hbase | [[Cassandra](https://cassandra.apache.org/_/index.html) / [Astra DB](https://www.datastax.com/products/datastax-astra?utm_source=feast)] | [IKV](https://inlined.io) | Milvus | +| :-------------------------------------------------------- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- |:-------| +| write feature values to the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| read feature values from the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| update infrastructure (e.g. tables) in the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| teardown infrastructure (e.g. tables) in the online store | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| generate a plan of infrastructure changes | yes | no | no | no | no | no | no | yes | no | no | +| support for on-demand transforms | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| readable by Python SDK | yes | yes | yes | yes | yes | yes | yes | yes | yes | yes | +| readable by Java | no | yes | no | no | no | no | no | no | no | no | +| readable by Go | yes | yes | no | no | no | no | no | no | no | no | +| support for entityless feature views | yes | yes | yes | yes | yes | yes | yes | yes | yes | no | +| support for concurrent writing to the same key | no | yes | no | no | no | no | no | no | yes | no | +| support for ttl (time to live) at retrieval | no | yes | no | no | no | no | no | no | no | no | +| support for deleting expired data | no | yes | no | no | no | no | no | no | no | no | +| collocated by feature view | yes | no | yes | yes | yes | yes | yes | yes | no | no | +| collocated by feature service | no | no | no | no | no | no | no | no | no | no | +| collocated by entity key | no | yes | no | no | no | no | no | no | yes | no | diff --git a/docs/reference/registries/remote.md b/docs/reference/registries/remote.md new file mode 100644 index 00000000000..3651aeb71ea --- /dev/null +++ b/docs/reference/registries/remote.md @@ -0,0 +1,28 @@ +# Remote Registry + +## Description + +The Remote Registry is a gRPC client for the registry that implements the `RemoteRegistry` class using the existing `BaseRegistry` interface. + +## How to configure the client + +User needs to create a client side `feature_store.yaml` file, set the `registry_type` to `remote` and provide the server connection configuration. +The `path` parameter is a URL with a port (default is 6570) used by the client to connect with the Remote Registry server. + +{% code title="feature_store.yaml" %} +```yaml +registry: + registry_type: remote + path: http://localhost:6570 +``` +{% endcode %} + +The optional `cert` parameter can be configured as well, it should point to the public certificate path when the Registry Server starts in SSL mode. This may be needed if the Registry Server is started with a self-signed certificate, typically this file ends with *.crt, *.cer, or *.pem. +More info about the `cert` parameter can be found in [feast-client-connecting-to-remote-registry-sever-started-in-tls-mode](../../how-to-guides/starting-feast-servers-tls-mode.md#feast-client-connecting-to-remote-registry-sever-started-in-tls-mode) + +## How to configure the server + +Please see the detail how to configure registry server [registry-server.md](../feature-servers/registry-server.md) + +## How to configure Authentication and Authorization +Please refer the [page](./../../../docs/getting-started/concepts/permission.md) for more details on how to configure authentication and authorization. diff --git a/docs/roadmap.md b/docs/roadmap.md index ff6549a3cb1..cb55873c3fa 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -4,6 +4,9 @@ The list below contains the functionality that contributors are planning to deve * We welcome contribution to all items in the roadmap! +* **Natural Language Processing** + * [x] Vector Search (Alpha release. See [RFC](https://docs.google.com/document/d/18IWzLEA9i2lDWnbfbwXnMCg3StlqaLVI-uRpQjr_Vos/edit#heading=h.9gaqqtox9jg6)) + * [ ] [Enhanced Feature Server and SDK for native support for NLP](https://github.com/feast-dev/feast/issues/4964) * **Data Sources** * [x] [Snowflake source](https://docs.feast.dev/reference/data-sources/snowflake) * [x] [Redshift source](https://docs.feast.dev/reference/data-sources/redshift) @@ -13,6 +16,7 @@ The list below contains the functionality that contributors are planning to deve * [x] [Hive (community plugin)](https://github.com/baineng/feast-hive) * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/data-sources/postgres) * [x] [Spark (contrib plugin)](https://docs.feast.dev/reference/data-sources/spark) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/data-sources/couchbase) * [x] Kafka / Kinesis sources (via [push support into the online store](https://docs.feast.dev/reference/data-sources/push)) * **Offline Stores** * [x] [Snowflake](https://docs.feast.dev/reference/offline-stores/snowflake) @@ -23,6 +27,7 @@ The list below contains the functionality that contributors are planning to deve * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/offline-stores/postgres) * [x] [Trino (contrib plugin)](https://github.com/Shopify/feast-trino) * [x] [Spark (contrib plugin)](https://docs.feast.dev/reference/offline-stores/spark) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/offline-stores/couchbase) * [x] [In-memory / Pandas](https://docs.feast.dev/reference/offline-stores/file) * [x] [Custom offline store support](https://docs.feast.dev/how-to-guides/customizing-feast/adding-a-new-offline-store) * **Online Stores** @@ -37,12 +42,14 @@ The list below contains the functionality that contributors are planning to deve * [x] [Azure Cache for Redis (community plugin)](https://github.com/Azure/feast-azure) * [x] [Postgres (contrib plugin)](https://docs.feast.dev/reference/online-stores/postgres) * [x] [Cassandra / AstraDB (contrib plugin)](https://docs.feast.dev/reference/online-stores/cassandra) + * [x] [ScyllaDB (contrib plugin)](https://docs.feast.dev/reference/online-stores/scylladb) + * [x] [Couchbase (contrib plugin)](https://docs.feast.dev/reference/online-stores/couchbase) * [x] [Custom online store support](https://docs.feast.dev/how-to-guides/customizing-feast/adding-support-for-a-new-online-store) * **Feature Engineering** - * [x] On-demand Transformations (Beta release. See [RFC](https://docs.google.com/document/d/1lgfIw0Drc65LpaxbUu49RCeJgMew547meSJttnUqz7c/edit#)) + * [x] On-demand Transformations (On Read) (Beta release. See [RFC](https://docs.google.com/document/d/1lgfIw0Drc65LpaxbUu49RCeJgMew547meSJttnUqz7c/edit#)) * [x] Streaming Transformations (Alpha release. See [RFC](https://docs.google.com/document/d/1UzEyETHUaGpn0ap4G82DHluiCj7zEbrQLkJJkKSv4e8/edit)) * [ ] Batch transformation (In progress. See [RFC](https://docs.google.com/document/d/1964OkzuBljifDvkV-0fakp2uaijnVzdwWNGdz7Vz50A/edit)) - * [ ] Persistent On-demand Transformations (Beta release. See [GitHub Issue](https://github.com/feast-dev/feast/issues/4376)) + * [x] On-demand Transformations (On Write) (Beta release. See [GitHub Issue](https://github.com/feast-dev/feast/issues/4376)) * **Streaming** * [x] [Custom streaming ingestion job support](https://docs.feast.dev/how-to-guides/customizing-feast/creating-a-custom-provider) * [x] [Push based streaming data ingestion to online store](https://docs.feast.dev/reference/data-sources/push) @@ -65,5 +72,3 @@ The list below contains the functionality that contributors are planning to deve * [x] DataHub integration (see [DataHub Feast docs](https://datahubproject.io/docs/generated/ingestion/sources/feast/)) * [x] Feast Web UI (Beta release. See [docs](https://docs.feast.dev/reference/alpha-web-ui)) * [ ] Feast Lineage Explorer -* **Natural Language Processing** - * [x] Vector Search (Alpha release. See [RFC](https://docs.google.com/document/d/18IWzLEA9i2lDWnbfbwXnMCg3StlqaLVI-uRpQjr_Vos/edit#heading=h.9gaqqtox9jg6)) diff --git a/docs/tutorials/validating-historical-features.md b/docs/tutorials/validating-historical-features.md index 03baccfbc9e..1984adcdcf9 100644 --- a/docs/tutorials/validating-historical-features.md +++ b/docs/tutorials/validating-historical-features.md @@ -173,7 +173,7 @@ def on_demand_stats(inp: pd.DataFrame) -> pd.DataFrame: return out ``` -*Read more about on demand feature views [here](https://docs.feast.dev/reference/alpha-on-demand-feature-view)* +*Read more about on demand feature views [here](../reference/beta-on-demand-feature-view.md)* ```python diff --git a/examples/README.md b/examples/README.md index e796b5000e3..6dac867be43 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,18 +1,22 @@ # Feast Examples -1. **[Quickstart Example](https://github.com/feast-dev/feast/tree/master/examples/quickstart)**: This is a step-by-step guide for getting started with Feast. - -2. **[Java Demo](https://github.com/feast-dev/feast/tree/master/examples/java-demo)**: Demonstrates how to use Feast with Java feature server and deployed with Kubernetes. - -3. **[Python Helm Demo](https://github.com/feast-dev/feast/tree/master/examples/python-helm-demo)**: Demonstrates Feast with Kubernetes using Helm charts and Python feature server. - -4. **[RBAC Local](https://github.com/feast-dev/feast/tree/master/examples/rbac-local)**: Demonstrates using notebooks how configure and test Role-Based Access Control (RBAC) for securing access in Feast using OIDC authorization type with in a local environment. - -5. **[RBAC Remote](https://github.com/feast-dev/feast/tree/master/examples/rbac-local)**: Demonstrates how to configure and test Role-Based Access Control (RBAC) for securing access in Feast using Kubernetes or OIDC Authentication type with in Kubernetes environment. - -6. **[Remote Offline Store](https://github.com/feast-dev/feast/tree/master/examples/remote-offline-store)**: Demonstrates how to set up and use remote offline server. - -7. **[Podman/Podman Compose_local](https://github.com/feast-dev/feast/tree/master/examples/podman_local)**: Demonstrates how to deploy Feast remote server components using Podman Compose locally. - -8. **[RHOAI Feast Demo](https://github.com/feast-dev/feast/tree/master/examples/rhoai-quickstart)**: Showcases Feast's core functionality using a Jupyter notebook, including fetching online feature data from a remote server and retrieving metadata from a remote registry. - +The following examples illustrate various **Feast** use cases to enhance understanding of its functionality. + +1. **[Quickstart Example](quickstart)**: This is a step-by-step guide for getting started with Feast. +1. **[Java Demo](java-demo)**: Demonstrates how to use Feast with Java feature server and deploy it on Kubernetes. +1. **[Kind Quickstart](kind-quickstart)**: Demonstrates how to install and use Feast on Kind with the Helm chart. +1. **[Credit Risk End-to-End](credit-risk-end-to-end)**: Demonstrates how to use Feast with Java feature server and deploy it on Kubernetes. +1. **[Python Helm Demo](python-helm-demo)**: Demonstrates Feast with Kubernetes using Helm charts and Python feature server. +1. **[RBAC Local](rbac-local)**: Shows how to configure and test Role-Based Access Control (RBAC) for securing access in Feast using OIDC authorization in a local environment. +1. **[RBAC Remote](rbac-remote)**: Demonstrates how to configure and test Role-Based Access Control (RBAC) for securing access in Feast using Kubernetes or OIDC Authentication in a Kubernetes environment. +1. **[Remote Offline Store](remote-offline-store)**: Demonstrates how to set up and use a remote offline store. +1. **[Podman/Podman Compose Local](podman_local)**: Demonstrates how to deploy Feast remote server components using Podman Compose locally. +1. **[RHOAI Feast Demo](rhoai-quickstart)**: Showcases Feast's core functionality using a Jupyter notebook, including fetching online feature data from a remote server and retrieving metadata from a remote registry. + +# Feast Operator Examples + +The examples below showcase how to deploy and manage **Feast on Kubernetes** using the **Feast Go Operator**. + +1. **[Operator Quickstart](operator-quickstart)**: Demonstrates how to install and use Feast on Kubernetes with the Feast Go Operator. +1. **[Operator Quickstart with Postgres in TLS](operator-postgres-tls-demo)**: Demonstrates installing and configuring Feast with PostgreSQL in TLS mode on Kubernetes using the Feast Go Operator, with an emphasis on volumes and VolumeMounts support. +1. **[Operator RBAC with Kubernetes](operator-rbac)**: Demonstrates the Feast RBAC example on Kubernetes using the Feast Operator. diff --git a/examples/credit-risk-end-to-end/01_Credit_Risk_Data_Prep.ipynb b/examples/credit-risk-end-to-end/01_Credit_Risk_Data_Prep.ipynb new file mode 100644 index 00000000000..a345ec8ca46 --- /dev/null +++ b/examples/credit-risk-end-to-end/01_Credit_Risk_Data_Prep.ipynb @@ -0,0 +1,757 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a52c80c4-1ea2-4d1e-b582-fac51081e76d", + "metadata": {}, + "source": [ + "
" + ] + }, + { + "cell_type": "markdown", + "id": "576a8e30-fe4c-4eda-bc56-9edd7fde3385", + "metadata": {}, + "source": [ + "# Credit Risk Data Preparation" + ] + }, + { + "cell_type": "markdown", + "id": "1f3fbd5a-1587-4b4e-9263-a57490657337", + "metadata": {}, + "source": [ + "Predicting credit risk is an important task for financial institutions. If a bank can accurately determine the probability that a borrower will pay back a future loan, then they can make better decisions on loan terms and approvals. Getting credit risk right is critical to offering good financial services, and getting credit risk wrong could mean going out of business.\n", + "\n", + "AI models have played a central role in modern credit risk assessment systems. In this example, we develop a credit risk model to predict whether a future loan will be good or bad, given some context data (presumably supplied from the loan application). We use the modeling process to demonstrate how Feast can be used to facilitate the serving of data for training and inference use-cases.\n", + "\n", + "In this notebook, we prepare the data." + ] + }, + { + "cell_type": "markdown", + "id": "4d05715f-ddb8-42de-8f0c-212dcbad9e0e", + "metadata": {}, + "source": [ + "### Setup" + ] + }, + { + "cell_type": "markdown", + "id": "6fba29f9-db1f-4ceb-b066-5b2df2c95d33", + "metadata": {}, + "source": [ + "*The following code assumes that you have read the example README.md file, and that you have setup an environment where the code can be run. Please make sure you have addressed the prerequisite needs.*" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8a897b19-6f82-4631-ae51-8a23182ff267", + "metadata": {}, + "outputs": [], + "source": [ + "# Import Python libraries\n", + "import os\n", + "import warnings\n", + "import datetime as dt\n", + "import pandas as pd\n", + "import numpy as np\n", + "from sklearn.datasets import fetch_openml" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b944ed48-54b3-43fa-8373-ce788d7e71af", + "metadata": {}, + "outputs": [], + "source": [ + "# suppress warning messages for example flow (don't run if you want to see warnings)\n", + "warnings.filterwarnings('ignore')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "70788c73-144f-4ecf-b370-c5669c538d93", + "metadata": {}, + "outputs": [], + "source": [ + "# Seed for reproducibility\n", + "SEED = 142" + ] + }, + { + "cell_type": "markdown", + "id": "cfb4dfd0-f583-4aa0-bd39-3ff9fbb80db0", + "metadata": {}, + "source": [ + "### Pull the Data" + ] + }, + { + "cell_type": "markdown", + "id": "3c206dfc-d551-4002-ae63-ccbb981768fa", + "metadata": {}, + "source": [ + "The data we will use to train the model is from the [OpenML](https://www.openml.org/) dataset [credit-g](https://www.openml.org/search?type=data&sort=runs&status=active&id=31), obtained from a 1994 German study. More details on the data can be found in the `DESC` attribute and `details` map (see below)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "31a9e964-bdb3-4ae4-b2b4-64bbe0ab93a3", + "metadata": {}, + "outputs": [], + "source": [ + "data = fetch_openml(name=\"credit-g\", version=1, parser='auto')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "58dbf7c2-f40b-4965-baac-6903a27ef622", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "**Author**: Dr. Hans Hofmann \n", + "**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)) - 1994 \n", + "**Please cite**: [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n", + "\n", + "**German Credit dataset** \n", + "This dataset classifies people described by a set of attributes as good or bad credit risks.\n", + "\n", + "This dataset comes with a cost matrix: \n", + "``` \n", + "Good Bad (predicted) \n", + "Good 0 1 (actual) \n", + "Bad 5 0 \n", + "```\n", + "\n", + "It is worse to class a customer as good when they are bad (5), than it is to class a customer as bad when they are good (1). \n", + "\n", + "### Attribute description \n", + "\n", + "1. Status of existing checking account, in Deutsche Mark. \n", + "2. Duration in months \n", + "3. Credit history (credits taken, paid back duly, delays, critical accounts) \n", + "4. Purpose of the credit (car, television,...) \n", + "5. Credit amount \n", + "6. Status of savings account/bonds, in Deutsche Mark. \n", + "7. Present employment, in number of years. \n", + "8. Installment rate in percentage of disposable income \n", + "9. Personal status (married, single,...) and sex \n", + "10. Other debtors / guarantors \n", + "11. Present residence since X years \n", + "12. Property (e.g. real estate) \n", + "13. Age in years \n", + "14. Other installment plans (banks, stores) \n", + "15. Housing (rent, own,...) \n", + "16. Number of existing credits at this bank \n", + "17. Job \n", + "18. Number of people being liable to provide maintenance for \n", + "19. Telephone (yes,no) \n", + "20. Foreign worker (yes,no)\n", + "\n", + "Downloaded from openml.org.\n" + ] + } + ], + "source": [ + "print(data.DESCR)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "53de57ec-0fb6-4b51-9c27-696b059a1847", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original data url: https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)\n", + "Paper url: https://dl.acm.org/doi/abs/10.1145/967900.968104\n" + ] + } + ], + "source": [ + "print(\"Original data url: \".ljust(20), data.details[\"original_data_url\"])\n", + "print(\"Paper url: \".ljust(20), data.details[\"paper_url\"])" + ] + }, + { + "cell_type": "markdown", + "id": "6b2c2514-484e-46cb-aedc-89a301266f44", + "metadata": {}, + "source": [ + "### High-Level Data Inspection" + ] + }, + { + "cell_type": "markdown", + "id": "a76af306-caba-403d-a9cb-b5de12573075", + "metadata": {}, + "source": [ + "Let's inspect the data to see high level details like data types and size. We also want to make sure there are no glaring issues (like a large number of null values)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "20fb82c4-ed8d-42f8-b386-c7ebdc9bf786", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RangeIndex: 1000 entries, 0 to 999\n", + "Data columns (total 21 columns):\n", + " # Column Non-Null Count Dtype \n", + "--- ------ -------------- ----- \n", + " 0 checking_status 1000 non-null category\n", + " 1 duration 1000 non-null int64 \n", + " 2 credit_history 1000 non-null category\n", + " 3 purpose 1000 non-null category\n", + " 4 credit_amount 1000 non-null int64 \n", + " 5 savings_status 1000 non-null category\n", + " 6 employment 1000 non-null category\n", + " 7 installment_commitment 1000 non-null int64 \n", + " 8 personal_status 1000 non-null category\n", + " 9 other_parties 1000 non-null category\n", + " 10 residence_since 1000 non-null int64 \n", + " 11 property_magnitude 1000 non-null category\n", + " 12 age 1000 non-null int64 \n", + " 13 other_payment_plans 1000 non-null category\n", + " 14 housing 1000 non-null category\n", + " 15 existing_credits 1000 non-null int64 \n", + " 16 job 1000 non-null category\n", + " 17 num_dependents 1000 non-null int64 \n", + " 18 own_telephone 1000 non-null category\n", + " 19 foreign_worker 1000 non-null category\n", + " 20 class 1000 non-null category\n", + "dtypes: category(14), int64(7)\n", + "memory usage: 71.0 KB\n" + ] + } + ], + "source": [ + "df = data.frame\n", + "df.info()" + ] + }, + { + "cell_type": "markdown", + "id": "a384932a-40df-45f6-bfbc-a9cf6c708f1b", + "metadata": {}, + "source": [ + "We see that there are 21 columns, each with 1000 non-null values. The first 20 columns are contextual fields with `Dtype` of `category` or `int64`, while the last field is actually the target variable, `class`, which we wish to predict. \n", + "\n", + "From the description (above), the `class` tells us whether a loan to a customer was \"good\" or \"bad\". We are anticipating that patterns in the contextual data, as well as their relationship to the class outcomes, can give insight into loan classification. In the following notebooks, we will build a loan classification model that seeks to encode these patterns and relationships in its weights, such that given a new loan application (context data), the model can predict whether the loan (if approved) will be good or bad in the future." + ] + }, + { + "cell_type": "markdown", + "id": "a451c9a3-0390-4d5a-b687-c59f52445eb1", + "metadata": {}, + "source": [ + "### Data Preparation For Demonstrating Feast" + ] + }, + { + "cell_type": "markdown", + "id": "dc4e7653-b118-44c3-ade3-f1b217b112fc", + "metadata": {}, + "source": [ + "At this point, it's important to bring up that Feast was developed primarily to work with production data. Feast requires datasets to have entities (in our case, IDs) and timestamps, which it uses in joins. Feast can support joining data on multiple entities (like primary keys in SQL), as well as \"created\" timestamps and \"event\" timestamps. However, in this example, we'll keep things more simple.\n", + "\n", + "In a real loan application scenario, the application fields (in a database) would be associated with a timestamp, while the actual loan outcome (label) would be determined much later and recorded separately with a different timestamp.\n", + "\n", + "In order to demonstrate Feast capabilities, such as point-in-time joins, we will mock IDs and timestamps for this data. For IDs, we will use the original dataframe index values. For the timestamps, we will generate random values between \"Tue Sep 24 12:00:00 2023\" and \"Wed Oct 9 12:00:00 2023\"." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "9d6ec4f6-9410-4858-a440-45dccaa0896b", + "metadata": {}, + "outputs": [], + "source": [ + "# Make index into \"ID\" column\n", + "df = df.reset_index(names=[\"ID\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "055f2cb7-3abf-4d01-be60-e4c7b8ad1988", + "metadata": {}, + "outputs": [], + "source": [ + "# Add mock timestamps\n", + "time_format = \"%a %b %d %H:%M:%S %Y\"\n", + "date = dt.datetime.strptime(\"Wed Oct 9 12:00:00 2023\", time_format)\n", + "end = int(date.timestamp())\n", + "start = int((date - dt.timedelta(days=15)).timestamp()) # 'Tue Sep 24 12:00:00 2023'\n", + "\n", + "def make_tstamp(date):\n", + " dtime = dt.datetime.fromtimestamp(date).ctime()\n", + " return dtime\n", + " \n", + "# (seed set for reproducibility)\n", + "np.random.seed(SEED)\n", + "df[\"application_timestamp\"] = pd.to_datetime([\n", + " make_tstamp(d) for d in np.random.randint(start, end, len(df))\n", + "])" + ] + }, + { + "cell_type": "markdown", + "id": "f7800ea9-de9a-4aab-9d77-c4276e7db5f9", + "metadata": {}, + "source": [ + "Verify that the newly created \"ID\" and \"application_timestamp\" fields were added to the data as expected." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "9516fc5c-7c25-4e60-acba-7400ab6bab42", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
012
ID012
checking_status<00<=X<200no checking
duration64812
credit_historycritical/other existing creditexisting paidcritical/other existing credit
purposeradio/tvradio/tveducation
credit_amount116959512096
savings_statusno known savings<100<100
employment>=71<=X<44<=X<7
installment_commitment422
personal_statusmale singlefemale div/dep/marmale single
other_partiesnonenonenone
residence_since423
property_magnitudereal estatereal estatereal estate
age672249
other_payment_plansnonenonenone
housingownownown
existing_credits211
jobskilledskilledunskilled resident
num_dependents112
own_telephoneyesnonenone
foreign_workeryesyesyes
classgoodbadgood
application_timestamp2023-10-04 17:50:132023-09-28 18:10:132023-10-03 23:06:03
\n", + "
" + ], + "text/plain": [ + " 0 1 \\\n", + "ID 0 1 \n", + "checking_status <0 0<=X<200 \n", + "duration 6 48 \n", + "credit_history critical/other existing credit existing paid \n", + "purpose radio/tv radio/tv \n", + "credit_amount 1169 5951 \n", + "savings_status no known savings <100 \n", + "employment >=7 1<=X<4 \n", + "installment_commitment 4 2 \n", + "personal_status male single female div/dep/mar \n", + "other_parties none none \n", + "residence_since 4 2 \n", + "property_magnitude real estate real estate \n", + "age 67 22 \n", + "other_payment_plans none none \n", + "housing own own \n", + "existing_credits 2 1 \n", + "job skilled skilled \n", + "num_dependents 1 1 \n", + "own_telephone yes none \n", + "foreign_worker yes yes \n", + "class good bad \n", + "application_timestamp 2023-10-04 17:50:13 2023-09-28 18:10:13 \n", + "\n", + " 2 \n", + "ID 2 \n", + "checking_status no checking \n", + "duration 12 \n", + "credit_history critical/other existing credit \n", + "purpose education \n", + "credit_amount 2096 \n", + "savings_status <100 \n", + "employment 4<=X<7 \n", + "installment_commitment 2 \n", + "personal_status male single \n", + "other_parties none \n", + "residence_since 3 \n", + "property_magnitude real estate \n", + "age 49 \n", + "other_payment_plans none \n", + "housing own \n", + "existing_credits 1 \n", + "job unskilled resident \n", + "num_dependents 2 \n", + "own_telephone none \n", + "foreign_worker yes \n", + "class good \n", + "application_timestamp 2023-10-03 23:06:03 " + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Check data (first few records, transposed for readability)\n", + "df.head(3).T" + ] + }, + { + "cell_type": "markdown", + "id": "72b2105a-b459-4715-aa53-6fe69fc4a210", + "metadata": {}, + "source": [ + "We'll also generate counterpart IDs and timestamps on the label data. In a real-life scenario, the label data would come separate and later relative to the loan application data. To mimic this, let's create a labels dataset with an \"outcome_timestamp\" column with a variable lag from the application timestamp of 30 to 90 days." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "e214478b-ed9b-4354-ba6f-4117813c56c3", + "metadata": {}, + "outputs": [], + "source": [ + "# Add (lagged) label timestamps (30 to 90 days)\n", + "def lag_delta(data, seed):\n", + " np.random.seed(seed)\n", + " delta_days = np.random.randint(30, 90, len(data))\n", + " delta_hours = np.random.randint(0, 24, len(data))\n", + " delta = np.array([dt.timedelta(days=int(delta_days[i]), hours=int(delta_hours[i])) for i in range(len(data))])\n", + " return delta\n", + "\n", + "labels = df[[\"ID\", \"class\"]]\n", + "labels[\"outcome_timestamp\"] = pd.to_datetime(df.application_timestamp + lag_delta(df, SEED))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "356a7225-db20-4c15-87a3-4a0eb3127475", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDclassoutcome_timestamp
00good2023-11-24 22:50:13
11bad2023-11-03 12:10:13
22good2023-11-30 22:06:03
\n", + "
" + ], + "text/plain": [ + " ID class outcome_timestamp\n", + "0 0 good 2023-11-24 22:50:13\n", + "1 1 bad 2023-11-03 12:10:13\n", + "2 2 good 2023-11-30 22:06:03" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Check labels\n", + "labels.head(3)" + ] + }, + { + "cell_type": "markdown", + "id": "4a29f754-f758-402b-ac42-2dcfcee3b7fc", + "metadata": {}, + "source": [ + "You can verify that the `outcome timestamp` has a difference of 30 to 90 days from the \"application_timestamp\" (above)." + ] + }, + { + "cell_type": "markdown", + "id": "e720ce24-e092-4fcd-be3e-68bb18f4d2a7", + "metadata": {}, + "source": [ + "### Save Data" + ] + }, + { + "cell_type": "markdown", + "id": "5cae0578-8431-46c7-8d64-e52146f47d46", + "metadata": {}, + "source": [ + "Now that we have our data prepared, let's save it to local parquet files in the `data` directory (parquet is one of the file formats supported by Feast).\n", + "\n", + "One more step we will add is splitting the context data column-wise and saving it in two files. This step is contrived--we don't usually split data when we don't need to--but it will allow us to demonstrate later how Feast can easily join datasets (a common need in Data Science projects)." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "cebef56c-1f54-4d31-a545-75d708d38579", + "metadata": {}, + "outputs": [], + "source": [ + "# Create the data directory if it doesn't exist\n", + "os.makedirs(\"Feature_Store/data\", exist_ok=True)\n", + "\n", + "# Split columns and save context data\n", + "a_cols = [\n", + " 'ID', 'checking_status', 'duration', 'credit_history', 'purpose',\n", + " 'credit_amount', 'savings_status', 'employment', 'application_timestamp',\n", + " 'installment_commitment', 'personal_status', 'other_parties',\n", + "]\n", + "b_cols = [\n", + " 'ID', 'residence_since', 'property_magnitude', 'age', 'other_payment_plans',\n", + " 'housing', 'existing_credits', 'job', 'num_dependents', 'own_telephone',\n", + " 'foreign_worker', 'application_timestamp'\n", + "]\n", + "\n", + "df[a_cols].to_parquet(\"Feature_Store/data/data_a.parquet\", engine=\"pyarrow\")\n", + "df[b_cols].to_parquet(\"Feature_Store/data/data_b.parquet\", engine=\"pyarrow\")\n", + "\n", + "# Save label data\n", + "labels.to_parquet(\"Feature_Store/data/labels.parquet\", engine=\"pyarrow\")" + ] + }, + { + "cell_type": "markdown", + "id": "d8d5de9f-bd27-4e95-802c-b121743dd1b0", + "metadata": {}, + "source": [ + "We have saved the following files to the `Feature_Store/data` directory: \n", + "- `data_a.parquet` (training data, a columns)\n", + "- `data_b.parquet` (training data, b columns)\n", + "- `labels.parquet` (label outcomes)" + ] + }, + { + "cell_type": "markdown", + "id": "af6355dc-ff5b-4b3f-b0bd-3c4020ef67e8", + "metadata": {}, + "source": [ + "With the feature data prepared, we are ready to setup and deploy the feature store. \n", + "\n", + "Continue with the [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb) notebook." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/credit-risk-end-to-end/02_Deploying_the_Feature_Store.ipynb b/examples/credit-risk-end-to-end/02_Deploying_the_Feature_Store.ipynb new file mode 100644 index 00000000000..f736cdaed93 --- /dev/null +++ b/examples/credit-risk-end-to-end/02_Deploying_the_Feature_Store.ipynb @@ -0,0 +1,801 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "08d9e060-d455-43e2-b1ec-51e2a53e3169", + "metadata": {}, + "source": [ + "
" + ] + }, + { + "cell_type": "markdown", + "id": "93095241-3886-44a2-83b1-2a9537c21bc8", + "metadata": {}, + "source": [ + "# Deploying the Feature Store" + ] + }, + { + "cell_type": "markdown", + "id": "465783da-18eb-4945-98e7-bb1058a7af1b", + "metadata": {}, + "source": [ + "### Introduction" + ] + }, + { + "cell_type": "markdown", + "id": "11961d1b-72db-48dc-a07d-dcea9ba223b4", + "metadata": {}, + "source": [ + "Feast enables AI/ML teams to serve (and consume) features via feature stores. In this notebook, we will configure the feature stores and feature definitions, and deploy a Feast feature store server. We will also materialize (move) data from the offline store to the online store.\n", + "\n", + "In Feast, offline stores support pulling large amounts of data for model training using tools like Redshift, Snowflake, Bigquery, and Spark. In contrast, the focus of Feast online stores is feature serving in support of model inference, using tools like Redis, Snowflake, PostgreSQL, and SQLite.\n", + "\n", + "In this notebook, we will setup a file-based (Dask) offline store and SQLite online store. The online store will be made available through the Feast server." + ] + }, + { + "cell_type": "markdown", + "id": "dfed8ccf-0d7d-46a1-82f0-5765f8796088", + "metadata": {}, + "source": [ + "This notebook assumes that you have prepared the data by running the notebook [01_Credit_Risk_Data_Prep.ipynb](01_Credit_Risk_Data_Prep.ipynb). " + ] + }, + { + "cell_type": "markdown", + "id": "e66b7a08-5d15-4804-a82a-8bc571777496", + "metadata": {}, + "source": [ + "### Setup" + ] + }, + { + "cell_type": "markdown", + "id": "1c1e87a4-900b-48f3-a400-ce6608046ce3", + "metadata": {}, + "source": [ + "*The following code assumes that you have read the example README.md file, and that you have setup an environment where the code can be run. Please make sure you have addressed the prerequisite needs.*" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8bd21689-4a8e-4b0c-937d-0911df9db1d3", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import re\n", + "import sys\n", + "import time\n", + "import signal\n", + "import sqlite3\n", + "import subprocess\n", + "import datetime as dt\n", + "from feast import FeatureStore" + ] + }, + { + "cell_type": "markdown", + "id": "471db4b0-ea93-47a1-9d55-a80e4d2bdc1e", + "metadata": {}, + "source": [ + "### Feast Feature Store Configuration" + ] + }, + { + "cell_type": "markdown", + "id": "0a307490-4121-4bf3-a5c4-77a8885a4f6a", + "metadata": {}, + "source": [ + "For model training, we usually don't need (or want) a constantly running feature server. All we need is the ability to efficiently query and pull all of the training data at training time. In contrast, during model serving we need servers that are always ready to supply feature records in response to application requests. \n", + "\n", + "This training-serving dichotomy is reflected in Feast using \"offline\" and \"online\" stores. Offline stores are configured to work with database technologies typically used for training, while online stores are configured to use storage and streaming technologies that are popular for feature serving.\n", + "\n", + "We need to create a `feature_store.yaml` config file to tell feast the structure we want in our offline and online feature stores. Below, we write the configuration for a local \"Dask\" offline store and local SQLite online store. We give the feature store a project name of `loan_applications`, and provider `local`. The registry is where the feature store will keep track of feature definitions and online store updates; we choose a file location in this case.\n", + "\n", + "See the [feature_store.yaml](https://docs.feast.dev/reference/feature-repository/feature-store-yaml) documentation for further details. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b3757221-2037-49eb-867f-b9529fec06e2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing Feature_Store/feature_store.yaml\n" + ] + } + ], + "source": [ + "%%writefile Feature_Store/feature_store.yaml\n", + "\n", + "project: loan_applications\n", + "registry: data/registry.db\n", + "provider: local\n", + "offline_store:\n", + " type: dask\n", + "online_store:\n", + " type: sqlite\n", + " path: data/online_store.db\n", + "entity_key_serialization_version: 2" + ] + }, + { + "cell_type": "markdown", + "id": "180038f3-e5ce-4cce-bdf0-118eee7a822d", + "metadata": {}, + "source": [ + "### Feature Definitions" + ] + }, + { + "cell_type": "markdown", + "id": "dd44b206-1f5c-4f55-bbab-41ba2d3f5202", + "metadata": {}, + "source": [ + "We also need to create feature definitions and other feature constructs in a python file, which we name `feature_definitions.py`. For our purposes, we define the following:\n", + "\n", + "- Data Source: connections to data storage or data-producing endpoints\n", + "- Entity: primary key fields which can be used for joining data\n", + "- FeatureView: collections of features from a data source\n", + "\n", + "For more information on these, see the [Concepts](https://docs.feast.dev/getting-started/concepts) section of the Feast documentation." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d3e8fd80-0bee-463c-b3fb-bd0d1ee83a9c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing Feature_Store/feature_definitions.py\n" + ] + } + ], + "source": [ + "%%writefile Feature_Store/feature_definitions.py\n", + "\n", + "# Imports\n", + "import os\n", + "from pathlib import Path\n", + "from feast import (\n", + " FileSource,\n", + " Entity,\n", + " FeatureView,\n", + " Field,\n", + " FeatureService\n", + ")\n", + "from feast.types import Float32, String\n", + "from feast.data_format import ParquetFormat\n", + "\n", + "CURRENT_DIR = os.path.abspath(os.curdir)\n", + "\n", + "# Data Sources\n", + "# A data source tells Feast where the data lives\n", + "data_a = FileSource(\n", + " file_format=ParquetFormat(),\n", + " path=Path(CURRENT_DIR,\"data/data_a.parquet\").as_uri()\n", + ")\n", + "data_b = FileSource(\n", + " file_format=ParquetFormat(),\n", + " path=Path(CURRENT_DIR,\"data/data_b.parquet\").as_uri()\n", + ")\n", + "\n", + "# Entity\n", + "# An entity tells Feast the column it can use to join tables\n", + "loan_id = Entity(\n", + " name = \"loan_id\",\n", + " join_keys = [\"ID\"]\n", + ")\n", + "\n", + "# Feature views\n", + "# A feature view is how Feast groups features\n", + "features_a = FeatureView(\n", + " name=\"data_a\",\n", + " entities=[loan_id],\n", + " schema=[\n", + " Field(name=\"checking_status\", dtype=String),\n", + " Field(name=\"duration\", dtype=Float32),\n", + " Field(name=\"credit_history\", dtype=String),\n", + " Field(name=\"purpose\", dtype=String),\n", + " Field(name=\"credit_amount\", dtype=Float32),\n", + " Field(name=\"savings_status\", dtype=String),\n", + " Field(name=\"employment\", dtype=String),\n", + " Field(name=\"installment_commitment\", dtype=Float32),\n", + " Field(name=\"personal_status\", dtype=String),\n", + " Field(name=\"other_parties\", dtype=String),\n", + " ],\n", + " source=data_a\n", + ")\n", + "features_b = FeatureView(\n", + " name=\"data_b\",\n", + " entities=[loan_id],\n", + " schema=[\n", + " Field(name=\"residence_since\", dtype=Float32),\n", + " Field(name=\"property_magnitude\", dtype=String),\n", + " Field(name=\"age\", dtype=Float32),\n", + " Field(name=\"other_payment_plans\", dtype=String),\n", + " Field(name=\"housing\", dtype=String),\n", + " Field(name=\"existing_credits\", dtype=Float32),\n", + " Field(name=\"job\", dtype=String),\n", + " Field(name=\"num_dependents\", dtype=Float32),\n", + " Field(name=\"own_telephone\", dtype=String),\n", + " Field(name=\"foreign_worker\", dtype=String),\n", + " ],\n", + " source=data_b\n", + ")\n", + "\n", + "# Feature Service\n", + "# a feature service in Feast represents a logical group of features\n", + "loan_fs = FeatureService(\n", + " name=\"loan_fs\",\n", + " features=[features_a, features_b]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b47c1b5-849e-43f3-8043-60466aaed69f", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "be9723eb-8fa0-4338-b50c-f9f1ff6bb13a", + "metadata": {}, + "source": [ + "### Applying the Configuration and Definitions" + ] + }, + { + "cell_type": "markdown", + "id": "c796d45f-28c0-4875-bbb1-71e5a15dcb96", + "metadata": {}, + "source": [ + "Now that we have our feature store configuration (`feature_store.yaml`) and feature definitions (`feature_definitions.py`), we are ready to \"apply\" them. The `feast apply` command creates a registry file (`Feature_Store/data/registry.db`) and sets up data connections; in this case, it creates a SQLite database (`Feature_Store/data/online_store.db`)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "394467f3-4ced-492a-9379-105aea9d4a6d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "Created entity \u001b[1m\u001b[32mloan_id\u001b[0m\n", + "Created feature view \u001b[1m\u001b[32mdata_a\u001b[0m\n", + "Created feature view \u001b[1m\u001b[32mdata_b\u001b[0m\n", + "Created feature service \u001b[1m\u001b[32mloan_fs\u001b[0m\n", + "\n", + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "10/27/2024 02:19:03 PM root WARNING: Cannot use sqlite_vec for vector search\n", + "Created sqlite table \u001b[1m\u001b[32mloan_applications_data_a\u001b[0m\n", + "Created sqlite table \u001b[1m\u001b[32mloan_applications_data_b\u001b[0m\n", + "\n" + ] + } + ], + "source": [ + "# Run 'feast apply' in the Feature_Store directory\n", + "!feast --chdir ./Feature_Store apply" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e32f40eb-a31a-4877-8f40-2d8515302f39", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total 232\n", + "-rw-r--r-- 1 501 20 33K Oct 27 14:17 data_a.parquet\n", + "-rw-r--r-- 1 501 20 27K Oct 27 14:17 data_b.parquet\n", + "-rw-r--r-- 1 501 20 17K Oct 27 14:17 labels.parquet\n", + "-rw-r--r-- 1 501 20 28K Oct 27 14:19 online_store.db\n", + "-rw-r--r-- 1 501 20 2.8K Oct 27 14:19 registry.db\n" + ] + } + ], + "source": [ + "# List the Feature_Store/data/ directory to see newly created files\n", + "!ls -nlh Feature_Store/data/" + ] + }, + { + "cell_type": "markdown", + "id": "31014885-ce6a-4007-8bdb-d74d3b44781b", + "metadata": {}, + "source": [ + "Note that while `feast apply` set up the `sqlite` online database, `online_store.db`, no data has been added to the online database as of yet. We can verify this by connecting with the `sqlite3` library." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "107ca856-af06-40c4-8339-70daf59cdf37", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Online Store Tables: [('loan_applications_data_a',), ('loan_applications_data_b',)]\n", + "loan_applications_data_a data: []\n", + "loan_applications_data_b data: []\n" + ] + } + ], + "source": [ + "# Connect to sqlite database\n", + "conn = sqlite3.connect(\"Feature_Store/data/online_store.db\")\n", + "cursor = conn.cursor()\n", + "# Query table data (3 tables)\n", + "print(\n", + " \"Online Store Tables: \",\n", + " cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\").fetchall()\n", + ")\n", + "print(\n", + " \"loan_applications_data_a data: \",\n", + " cursor.execute(\"SELECT * FROM loan_applications_data_a\").fetchall()\n", + ")\n", + "print(\n", + " \"loan_applications_data_b data: \",\n", + " cursor.execute(\"SELECT * FROM loan_applications_data_b\").fetchall()\n", + ")\n", + "conn.close()" + ] + }, + { + "cell_type": "markdown", + "id": "03b927ee-7913-4a8a-b17b-9bee361d8d94", + "metadata": {}, + "source": [ + "Since we have used `feast apply` to create the registry, we can now use the Feast Python SDK to interact with our new feature store. To see other possible commands see the [Feast Python SDK documentation](https://rtd.feast.dev/en/master/)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c764a60a-b911-41a8-ba8f-7ef0a0bc7257", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "RepoConfig(project='loan_applications', provider='local', registry_config='data/registry.db', online_config={'type': 'sqlite', 'path': 'data/online_store.db'}, offline_config={'type': 'dask'}, batch_engine_config='local', feature_server=None, flags=None, repo_path=PosixPath('Feature_Store'), entity_key_serialization_version=2, coerce_tz_aware=True)" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Get feature store config\n", + "store = FeatureStore(repo_path=\"./Feature_Store\")\n", + "store.config" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fc572976-6ce9-44f6-8b67-28ee6157e29c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Feature view: data_a | Features: [checking_status-String, duration-Float32, credit_history-String, purpose-String, credit_amount-Float32, savings_status-String, employment-String, installment_commitment-Float32, personal_status-String, other_parties-String]\n", + "Feature view: data_b | Features: [residence_since-Float32, property_magnitude-String, age-Float32, other_payment_plans-String, housing-String, existing_credits-Float32, job-String, num_dependents-Float32, own_telephone-String, foreign_worker-String]\n" + ] + } + ], + "source": [ + "# List feature views\n", + "feature_views = store.list_batch_feature_views()\n", + "for fv in feature_views:\n", + " print(f\"Feature view: {fv.name} | Features: {fv.features}\")" + ] + }, + { + "cell_type": "markdown", + "id": "027edcfe-58d7-4dcb-92e2-5a5514c0f1f0", + "metadata": {}, + "source": [ + "### Deploying the Feature Store Servers" + ] + }, + { + "cell_type": "markdown", + "id": "c9aab68d-395f-421e-ba11-ad8c4acc9d6f", + "metadata": {}, + "source": [ + "If you wish to share a feature store with your team, Feast provides feature servers. To spin up an offline feature server process, we can use the `feast serve_offline` command, while to spin up a Feast online feature server, we use the `feast serve` command.\n", + "\n", + "Let's spin up an offline and an online server that we can use in the subsequent notebooks to get features during model training and model serving. We will run both servers as background processes, that we can communicate with in the other notebooks.\n", + "\n", + "First, we write a helper function to extract the first few printed log lines (so we can print it in the notebook cell output)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "568f81b8-df34-4b06-8a3f-1a6bdc2e6cff", + "metadata": {}, + "outputs": [], + "source": [ + "# TimeoutError class\n", + "class TimeoutError(Exception):\n", + " pass\n", + "\n", + "# TimeoutError raise function\n", + "def timeout():\n", + " raise TimeoutError(\"timeout\")\n", + "\n", + "# Get first few log lines function\n", + "def print_first_proc_lines(proc, wait):\n", + " '''Given a process, `proc`, read and print output lines until they stop \n", + " comming (waiting up to `wait` seconds for new lines to appear)'''\n", + " lines = \"\"\n", + " while True:\n", + " signal.signal(signal.SIGALRM, timeout)\n", + " signal.alarm(wait)\n", + " try:\n", + " lines += proc.stderr.readline()\n", + " except:\n", + " break\n", + " if lines:\n", + " print(lines, file=sys.stderr)" + ] + }, + { + "cell_type": "markdown", + "id": "88d25a87-241a-46c6-9ca7-d035959c5f74", + "metadata": {}, + "source": [ + "Launch the offline server with the command `feast --chdir ./Feature_Store serve_offline`." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "ce965dd4-652b-4c36-a064-fd0fd97d3ef7", + "metadata": {}, + "outputs": [], + "source": [ + "# Feast offline server process\n", + "offline_server_proc = subprocess.Popen(\n", + " \"feast --chdir ./Feature_Store serve_offline 2>&2 & echo $! > server_proc.txt\",\n", + " shell=True,\n", + " text=True,\n", + " stdout=subprocess.PIPE,\n", + " stderr=subprocess.PIPE,\n", + " bufsize=0\n", + ")\n", + "print_first_proc_lines(offline_server_proc, 2)" + ] + }, + { + "cell_type": "markdown", + "id": "59958d64-8e68-45ff-9549-556cbf46908c", + "metadata": {}, + "source": [ + "The tail end of the command above, `2>&2 & echo $! > server_proc.txt`, captures log messages (in the offline case there are none), and writes the process PID to the file `server_proc.txt` (we will use this in the cleanup notebook, [05_Credit_Risk_Cleanup.ipynb](05_Credit_Risk_Cleanup.ipynb))." + ] + }, + { + "cell_type": "markdown", + "id": "cfed4334-9e62-4f3f-be96-3f7db2f06ada", + "metadata": {}, + "source": [ + "Next, launch the online server with the command `feast --chdir ./Feature_Store serve`." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "a581fbe2-13ba-433e-8e76-dc82cc22af74", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/ddowler/Code/Feast/feast/examples/credit-risk-end-to-end/venv-py3.11/lib/python3.11/site-packages/uvicorn/workers.py:16: DeprecationWarning: The `uvicorn.workers` module is deprecated. Please use `uvicorn-worker` package instead.\n", + "For more details, see https://github.com/Kludex/uvicorn-worker.\n", + " warnings.warn(\n", + "[2024-10-27 14:19:07 -0600] [44621] [INFO] Starting gunicorn 23.0.0\n", + "[2024-10-27 14:19:07 -0600] [44621] [INFO] Listening at: http://127.0.0.1:6566 (44621)\n", + "[2024-10-27 14:19:07 -0600] [44621] [INFO] Using worker: uvicorn.workers.UvicornWorker\n", + "[2024-10-27 14:19:07 -0600] [44623] [INFO] Booting worker with pid: 44623\n", + "[2024-10-27 14:19:07 -0600] [44623] [INFO] Started server process [44623]\n", + "[2024-10-27 14:19:07 -0600] [44623] [INFO] Waiting for application startup.\n", + "[2024-10-27 14:19:07 -0600] [44623] [INFO] Application startup complete.\n", + "\n" + ] + } + ], + "source": [ + "# Feast online server (master and worker) processes\n", + "online_server_proc = subprocess.Popen(\n", + " \"feast --chdir ./Feature_Store serve 2>&2 & echo $! >> server_proc.txt\",\n", + " shell=True,\n", + " text=True,\n", + " stdout=subprocess.PIPE,\n", + " stderr=subprocess.PIPE,\n", + " bufsize=0\n", + ")\n", + "print_first_proc_lines(online_server_proc, 3)" + ] + }, + { + "cell_type": "markdown", + "id": "0e778173-f58a-4074-b63f-107e1f39577b", + "metadata": {}, + "source": [ + "Note that the output helpfully let's us know that the online server is \"Listening at: http://127.0.0.1:6566\" (the default host:port).\n", + "\n", + "List the running processes to verify they are up." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "9b1a224d-884d-45c5-9711-2e2eb4351710", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 501 44594 1 0 2:19PM ?? 0:03.66 **/python **/feast --chdir ./Feature_Store serve_offline\n", + " 501 44621 1 0 2:19PM ?? 0:03.58 **/python **/feast --chdir ./Feature_Store serve\n", + " 501 44623 44621 0 2:19PM ?? 0:00.03 **/python **/feast --chdir ./Feature_Store serve\n", + " 501 44662 44542 0 2:19PM ?? 0:00.01 /bin/zsh -c ps -ef | grep **/feast | grep serve\n" + ] + } + ], + "source": [ + "# List running Feast processes (paths redacted)\n", + "running_procs = !ps -ef | grep feast | grep serve\n", + "\n", + "for line in running_procs:\n", + " redacted = re.sub(r'/*[^\\s]*(?P(python )|(feast ))', r'**/\\g', line)\n", + " print(redacted)" + ] + }, + { + "cell_type": "markdown", + "id": "fd52eeb4-948c-472b-9111-8549fda955a1", + "metadata": {}, + "source": [ + "Note that there are two process for the online server (master and worker)." + ] + }, + { + "cell_type": "markdown", + "id": "8258e7a8-5f6e-4737-93ee-63591518b169", + "metadata": {}, + "source": [ + "### Materialize Features to the Online Store" + ] + }, + { + "cell_type": "markdown", + "id": "21b354ab-ec22-476d-8fd9-6ffe0f3fbacb", + "metadata": {}, + "source": [ + "At this point, there is no data in the online store yet. Let's use the SDK feature store object (that we created above) to \"materialize\" data; this is Feast lingo for moving/updating data from the offline store to the online store." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "ff6146df-03a7-4ac2-a665-ee5f440c3605", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:_list_feature_views will make breaking changes. Please use _list_batch_feature_views instead. _list_feature_views will behave like _list_all_feature_views in the future.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Materializing \u001b[1m\u001b[32m2\u001b[0m feature views from \u001b[1m\u001b[32m2023-09-24 12:00:00-06:00\u001b[0m to \u001b[1m\u001b[32m2024-01-07 12:00:00-07:00\u001b[0m into the \u001b[1m\u001b[32msqlite\u001b[0m online store.\n", + "\n", + "\u001b[1m\u001b[32mdata_a\u001b[0m:\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/1000 [00:00=7\",\"4<=X<7\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"0<=X<200\",\"no checking\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"existing paid\",\"critical/other existing credit\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"female div/dep/mar\",\"male mar/wid\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[12579.0,2463.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"none\",\"none\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"used car\",\"new car\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[24.0,24.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[4.0,4.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"yes\",\"yes\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[2.0,3.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[1.0,1.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[44.0,27.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"none\",\"none\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"for free\",\"own\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[1.0,2.0],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"high qualif/self emp/mgmt\",\"skilled\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"no known property\",\"life insurance\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]},{\"values\":[\"yes\",\"yes\"],\"statuses\":[\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"2023-09-25T01:03:47Z\",\"2023-09-29T03:17:24Z\"]}]}']" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response" + ] + }, + { + "cell_type": "markdown", + "id": "01d20196-1d42-486d-a0bd-97193c953785", + "metadata": {}, + "source": [ + "The `curl` command gave us a quick validation. In the [04_Credit_Risk_Model_Serving.ipynb](04_Credit_Risk_Model_Serving.ipynb) notebook, we'll use the Python `requests` library to handle the query better." + ] + }, + { + "cell_type": "markdown", + "id": "d74a5117-dd34-4dde-93a8-ea6e8c4c545a", + "metadata": {}, + "source": [ + "Now that the feature stores and their respective servers have been configured and deployed, we can proceed to train an AI model in [03_Credit_Risk_Model_Training.ipynb](03_Credit_Risk_Model_Training.ipynb)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/credit-risk-end-to-end/03_Credit_Risk_Model_Training.ipynb b/examples/credit-risk-end-to-end/03_Credit_Risk_Model_Training.ipynb new file mode 100644 index 00000000000..ca0d0e29d95 --- /dev/null +++ b/examples/credit-risk-end-to-end/03_Credit_Risk_Model_Training.ipynb @@ -0,0 +1,1541 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "54f2ab19-68e1-4725-b6e7-efd8eedebe1a", + "metadata": {}, + "source": [ + "
" + ] + }, + { + "cell_type": "markdown", + "id": "69a40de4-65cf-4b45-b321-2b7ce571f8cb", + "metadata": {}, + "source": [ + "# Credit Risk Model Training" + ] + }, + { + "cell_type": "markdown", + "id": "fe641d83-1e28-4f7f-895c-8ca038f6cc53", + "metadata": {}, + "source": [ + "### Introduction" + ] + }, + { + "cell_type": "markdown", + "id": "8f04f635-401b-47b6-b807-df61d42ec752", + "metadata": {}, + "source": [ + "AI models have played a central role in modern credit risk assessment systems. In this example, we develop a credit risk model to predict whether a future loan will be good or bad, given some context data (presumably supplied from the loan application process). We use the modeling process to demonstrate how Feast can be used to facilitate the serving of data for training and inference use-cases.\n", + "\n", + "In this notebook, we train our AI model. We will use the popular scikit-learn library (sklearn) to train a RandomForestClassifier, as this is a relatively easy choice for a baseline model." + ] + }, + { + "cell_type": "markdown", + "id": "a96bf1aa-c450-4201-83a4-e25b08bdd12d", + "metadata": {}, + "source": [ + "### Setup" + ] + }, + { + "cell_type": "markdown", + "id": "a47b33bc-bc06-4de0-8f3a-beea8179035c", + "metadata": {}, + "source": [ + "*The following code assumes that you have read the example README.md file, and that you have setup an environment where the code can be run. Please make sure you have addressed the prerequisite needs.*" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c66a3dab-fdbf-40be-8227-6180dc314a84", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import warnings\n", + "import datetime\n", + "import feast\n", + "import joblib\n", + "import pandas as pd\n", + "import seaborn as sns\n", + "\n", + "from feast import FeatureStore, RepoConfig\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.preprocessing import OrdinalEncoder\n", + "from sklearn.compose import ColumnTransformer\n", + "from sklearn.pipeline import Pipeline\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.metrics import classification_report" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2a841445-fa47-4826-a874-28ac0e4ea57f", + "metadata": {}, + "outputs": [], + "source": [ + "# Ignore warnings\n", + "warnings.filterwarnings(action=\"ignore\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "23579727-7797-4101-a70d-b0d4c24b0fdf", + "metadata": {}, + "outputs": [], + "source": [ + "# Random seed\n", + "SEED = 142" + ] + }, + { + "cell_type": "markdown", + "id": "fc5be519-7733-449b-8dc3-411e86371315", + "metadata": {}, + "source": [ + "This notebook assumes that you have already done the following:\n", + "\n", + "1. Run the [01_Credit_Risk_Data_Prep.ipynb](01_Credit_Risk_Data_Prep.ipynb) notebook to prepare the data.\n", + "2. Run the [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb) notebook to configure the feature stores and launch the feature store servers.\n", + "\n", + "If you have not completed the above steps, please go back and do so before continuing. This notebook relies on the data prepared by 1, and it uses the Feast offline server stood up by 2." + ] + }, + { + "cell_type": "markdown", + "id": "1ca99047-e508-4b1f-9f4c-f11e38587d70", + "metadata": {}, + "source": [ + "### Load Label (Outcome) Data" + ] + }, + { + "cell_type": "markdown", + "id": "89b49268-b7a5-4abc-8d82-1cdbf9bb4473", + "metadata": {}, + "source": [ + "From our previous data exploration, remember that the label data represents whether the loan was classed as \"good\" (1) or \"bad\" (0). Let's pull the labels for training, as we will use them as our \"entity dataframe\" when pulling features.\n", + "\n", + "This is also a good time to remember that the label timestamps are lagged by 30-90 days from the context data records." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6a227a12-7b3e-462a-8f6e-38a7690df1c4", + "metadata": {}, + "outputs": [], + "source": [ + "labels = pd.read_parquet(\"Feature_Store/data/labels.parquet\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "31a39cad-0a85-4d98-ad95-008c81bb6fe0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDclassoutcome_timestamp
00good2023-11-24 22:50:13
11bad2023-11-03 12:10:13
22good2023-11-30 22:06:03
33good2023-11-17 07:37:19
44bad2023-12-01 05:01:48
\n", + "
" + ], + "text/plain": [ + " ID class outcome_timestamp\n", + "0 0 good 2023-11-24 22:50:13\n", + "1 1 bad 2023-11-03 12:10:13\n", + "2 2 good 2023-11-30 22:06:03\n", + "3 3 good 2023-11-17 07:37:19\n", + "4 4 bad 2023-12-01 05:01:48" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "labels.head()" + ] + }, + { + "cell_type": "markdown", + "id": "857f29fd-46d3-444b-b24f-eaccd82ab7d3", + "metadata": {}, + "source": [ + "### Pull Feature Data from Feast Offline Store" + ] + }, + { + "cell_type": "markdown", + "id": "07c13b69-3d26-484c-97cd-97734cc812bd", + "metadata": {}, + "source": [ + "In order to pull feature data from the offline store, we create a FeatureStore object that connects to the offline server (continuously running in the previous notebook)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "9e9828f8-f210-4586-ac36-3f7e17f4f1e8", + "metadata": {}, + "outputs": [], + "source": [ + "# Create FeatureStore object\n", + "# (connects to the offline server deployed in 02_Deploying_the_Feature_Store.ipynb) \n", + "store = FeatureStore(config=RepoConfig(\n", + " project=\"loan_applications\",\n", + " provider=\"local\",\n", + " registry=\"Feature_Store/data/registry.db\",\n", + " offline_store={\n", + " \"type\": \"remote\",\n", + " \"host\": \"localhost\",\n", + " \"port\": 8815\n", + " },\n", + " entity_key_serialization_version=2\n", + "))" + ] + }, + { + "cell_type": "markdown", + "id": "c007e7ca-40c1-4850-abed-73b6171ad08d", + "metadata": {}, + "source": [ + "Now, we can retrieve feature data by supplying our entity dataframe and feature specifications to the `get_historical_features` function. Note that this function performs a fuzzy lookback (\"point-in-time\") join, matching the lagged outcome timestamp to the closest application timestamp (per ID) in the context data; it also joins the \"a\" and \"b\" features that we had previously split into two tables.\n", + "\n", + "To keep this example simple, we will limit our feature set to the numerical features plus two categorical features." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "dd2e3cb5-c865-48f4-80b6-8a14a1ff09ab", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:_list_feature_views will make breaking changes. Please use _list_batch_feature_views instead. _list_feature_views will behave like _list_all_feature_views in the future.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using outcome_timestamp as the event timestamp. To specify a column explicitly, please name it event_timestamp.\n" + ] + } + ], + "source": [ + "# Get feature data\n", + "# (Joins a and b data, and selects records with the right timestamps)\n", + "df = store.get_historical_features(\n", + " entity_df=labels,\n", + " features=[\n", + " \"data_a:duration\",\n", + " \"data_a:credit_amount\",\n", + " \"data_a:installment_commitment\",\n", + " \"data_a:checking_status\",\n", + " \"data_b:residence_since\",\n", + " \"data_b:age\",\n", + " \"data_b:existing_credits\",\n", + " \"data_b:num_dependents\",\n", + " \"data_b:housing\"\n", + " ]\n", + ").to_df()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c72f6cb1-bbbf-4512-98cd-0abe5ff0c24b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RangeIndex: 1000 entries, 0 to 999\n", + "Data columns (total 12 columns):\n", + " # Column Non-Null Count Dtype \n", + "--- ------ -------------- ----- \n", + " 0 ID 1000 non-null int64 \n", + " 1 class 1000 non-null category \n", + " 2 outcome_timestamp 1000 non-null datetime64[ns, UTC]\n", + " 3 duration 1000 non-null int64 \n", + " 4 credit_amount 1000 non-null int64 \n", + " 5 installment_commitment 1000 non-null int64 \n", + " 6 checking_status 1000 non-null category \n", + " 7 residence_since 1000 non-null int64 \n", + " 8 age 1000 non-null int64 \n", + " 9 existing_credits 1000 non-null int64 \n", + " 10 num_dependents 1000 non-null int64 \n", + " 11 housing 1000 non-null category \n", + "dtypes: category(3), datetime64[ns, UTC](1), int64(8)\n", + "memory usage: 73.8 KB\n" + ] + } + ], + "source": [ + "# Check the data info\n", + "df.info()" + ] + }, + { + "cell_type": "markdown", + "id": "110ea48c-0a5a-4642-aaba-a9eeb4a7da48", + "metadata": {}, + "source": [ + "### Split the Data" + ] + }, + { + "cell_type": "markdown", + "id": "f6669dce-a8b0-4d80-9a15-70b7dfd2d718", + "metadata": {}, + "source": [ + "Next, we split the data into a `train` and `validate` set, which we will use to train and then validate a model. The validation set will allow us to more accurately assess the model's performance on data that it has not seen during the training phase." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "036b0a54-48e4-4414-bb8c-0c30b6ab7469", + "metadata": {}, + "outputs": [], + "source": [ + "# Split data into train and validate datasets\n", + "train, validate = train_test_split(df, test_size=0.2, random_state=SEED)" + ] + }, + { + "cell_type": "markdown", + "id": "4b65cbf7-5981-4f51-97aa-a3ff7027f2f3", + "metadata": {}, + "source": [ + "### Exploratory Data Analysis" + ] + }, + { + "cell_type": "markdown", + "id": "e516ded8-10ad-4274-a736-f288290b5883", + "metadata": {}, + "source": [ + "Before building a model, a data scientist needs to gain understanding of the data to make sure it meets important statistical assumptions, and to identify potential opportunities and issues. As the purpose of this particular example is to show working with Feast, we will take the view of a data scientist looking to build a quick baseline model to establish some low-end metrics.\n", + "\n", + "Note that this data set is very \"clean\", as it has already been prepared. In real-life, production credit risk data can be much more complex, and have many issues that need to be understood and addressed before modeling." + ] + }, + { + "cell_type": "markdown", + "id": "553986a0-c804-4ab4-a4b9-48b16c72fd4f", + "metadata": {}, + "source": [ + "Let's look at counts for the target variable `class`, which tells us whether a (historical) loan was good or bad. We can see that there were many more good loans than bad, making the dataset imbalanced." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "607bd29b-eaf4-41a6-aaca-a8eaaf37e2d2", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAysElEQVR4nO3deVxV5d7///dmRmWDoIKWqJWKOHZw2k1qkWRkeWvllKlHG8FKy2PcOWLedqwcQ6tTqWWm2WBq5kRZHcVSTFNT1MqwFCgVtnoUBNbvj37sb/ugpQhsvHw9H4/1yHVd11rrc+3d1jdr2Ngsy7IEAABgKC9PFwAAAFCRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIO8AlyGazafz48Z4u45LD6wZcngg7QDmYN2+ebDab21KnTh116dJFn3zyiafLq3T5+fmaNWuWbrjhBtWsWVN+fn6qV6+e7rzzTr3zzjsqKirydInndODAAdlsNr3wwgueLuWCOZ1OTZgwQa1bt1aNGjUUGBioFi1aaNSoUTp06JCny5MkrVy5ksCJSufj6QIAkyQnJ6tRo0ayLEvZ2dmaN2+ebr/9di1fvlx33HGHp8urFL/++qu6deum9PR0xcXFafTo0QoNDVVWVpbWrVunfv36af/+/RozZoynSzXKDz/8oNjYWGVmZuqee+7Rgw8+KD8/P3377bd6/fXX9eGHH2rv3r2eLlMrV65USkoKgQeVirADlKNu3bqpbdu2rvUhQ4YoPDxc77zzzmUTdgYMGKBvvvlG77//vnr27OnWl5SUpC1btigjI8ND1ZmpsLBQPXv2VHZ2ttavX68bbrjBrX/SpEn65z//6aHqAM/jMhZQgUJCQhQYGCgfH/efK1544QVdd911CgsLU2BgoGJiYvTee++V2j4/P1/Dhw9X7dq1FRQUpDvvvFM///zzXx43OztbPj4+mjBhQqm+jIwM2Ww2vfTSS5KkM2fOaMKECWrcuLECAgIUFhamG264QWvXrr3g+aalpWn16tV68MEHSwWdEm3btlX//v3d2nJyclzBMCAgQK1bt9b8+fNLbXvy5Ek9+eSTql+/vvz9/dW0aVO98MILsizLbVxZX7cLcb41n+97bbPZlJiYqKVLl6pFixby9/dX8+bNtWrVqr+s5f3339f27dv1zDPPlAo6kmS32zVp0iS3tiVLligmJkaBgYGqVauW7rvvPv3yyy9uYzp37qzOnTuX2t+gQYPUsGFD1/ofL/29+uqruvrqq+Xv76927dpp8+bNbtulpKS45luyABWNMztAOcrLy9Nvv/0my7KUk5OjWbNm6cSJE7rvvvvcxs2YMUN33nmn+vfvr4KCAi1atEj33HOPVqxYofj4eNe4oUOHasGCBerXr5+uu+46ffrpp2795xIeHq5OnTrp3Xff1bhx49z6Fi9eLG9vb91zzz2SpPHjx2vy5MkaOnSo2rdvL6fTqS1btmjr1q269dZbL2j+y5cvl6RS8/0zp06dUufOnbV//34lJiaqUaNGWrJkiQYNGqTc3Fw9/vjjkiTLsnTnnXfqs88+05AhQ9SmTRutXr1aI0eO1C+//KJp06a59lnW1628a5bO/72WpH//+9/64IMP9OijjyooKEgzZ85Ur169lJmZqbCwsHPWs2zZMkm/n1U7H/PmzdPgwYPVrl07TZ48WdnZ2ZoxY4Y2bNigb775RiEhIRf+okhauHChjh8/roceekg2m01TpkxRz5499cMPP8jX11cPPfSQDh06pLVr1+qtt94q0zGAMrEAXLS5c+dakkot/v7+1rx580qN/89//uO2XlBQYLVo0cK6+eabXW3btm2zJFmPPvqo29h+/fpZkqxx48b9aU2vvPKKJcnasWOHW3t0dLTbcVq3bm3Fx8ef71T/1P/8z/9Ykqzc3Fy39lOnTlm//vqrazl27Jirb/r06ZYka8GCBa62goICy+FwWDVq1LCcTqdlWZa1dOlSS5L17LPPuu377rvvtmw2m7V//37Lsi7+dfvxxx8tSdbzzz9/zjHnW7Nlnd97bVmWJcny8/NzzcOyLGv79u2WJGvWrFl/WvO1115rBQcH/+mYPx6/Tp06VosWLaxTp0652lesWGFJssaOHetq69Spk9WpU6dS+xg4cKDVoEED13rJaxYWFmYdPXrU1f7RRx9Zkqzly5e72hISEiz+6UFl4zIWUI5SUlK0du1arV27VgsWLFCXLl00dOhQffDBB27jAgMDXX8+duyY8vLydOONN2rr1q2u9pUrV0qSHnvsMbdtn3jiifOqpWfPnvLx8dHixYtdbTt37tR3332n3r17u9pCQkK0a9cu7du377zneS5Op1OSVKNGDbf2l19+WbVr13Ytf7zUsnLlSkVERKhv376uNl9fXz322GM6ceKEPv/8c9c4b2/vUq/Hk08+KcuyXE+9Xezrdj7Ot2bp/N7rErGxsbr66qtd661atZLdbtcPP/zwp/U4nU4FBQWdV+1btmxRTk6OHn30UQUEBLja4+PjFRUVpY8//vi89nM2vXv3Vs2aNV3rN954oyT9Zf1ARSPsAOWoffv2io2NVWxsrPr376+PP/5Y0dHRSkxMVEFBgWvcihUr1LFjRwUEBCg0NFS1a9fWnDlzlJeX5xrz008/ycvLy+0fP0lq2rTpedVSq1Yt3XLLLXr33XddbYsXL5aPj4/b/TTJycnKzc1VkyZN1LJlS40cOVLffvttmeZf8g/uiRMn3Np79erlCoGtWrVy6/vpp5/UuHFjeXm5/3XUrFkzV3/Jf+vVq1fqH/WzjbuY1+18nG/N0vm91yUiIyNLtdWsWVPHjh3703rsdruOHz9+3rVLZ389oqKi3Gq/UP9df0nw+av6gYpG2AEqkJeXl7p06aLDhw+7zpx8+eWXuvPOOxUQEKDZs2dr5cqVWrt2rfr161fqRtuL1adPH+3du1fbtm2TJL377ru65ZZbVKtWLdeYm266Sd9//73eeOMNtWjRQq+99pr+9re/6bXXXrvg40VFRUn6/QzSH9WvX98VAv/4k7/pLvS99vb2Put+/ur/i6ioKOXl5engwYPlUneJc908fK7vSSpr/UBFI+wAFaywsFDS/zvb8f777ysgIECrV6/W3//+d3Xr1k2xsbGltmvQoIGKi4v1/fffu7VfyGPbPXr0kJ+fnxYvXqxt27Zp79696tOnT6lxoaGhGjx4sN555x0dPHhQrVq1KtP3oJQ8Xv/222+f9zYNGjTQvn37VFxc7Na+Z88eV3/Jfw8dOlTqDMbZxl3s61ZeNZ/ve32xunfvLklasGDBX44tqe1sr0dGRoarX/r9zExubm6pcRdz9oenr+AJhB2gAp05c0Zr1qyRn5+f6xKHt7e3bDab20/HBw4c0NKlS9227datmyRp5syZbu3Tp08/7+OHhIQoLi5O7777rhYtWiQ/Pz/16NHDbcyRI0fc1mvUqKFrrrlG+fn5rra8vDzt2bPnrJde/uj666/XrbfeqldffVUfffTRWcf890/5t99+u7KystzuLSosLNSsWbNUo0YNderUyTWuqKjI9ch8iWnTpslms7ler/J43f7K+dZ8vu/1xbr77rvVsmVLTZo0SWlpaaX6jx8/rmeeeUbS74/+16lTRy+//LLbe/zJJ59o9+7dbk+IXX311dqzZ49+/fVXV9v27du1YcOGMtdavXp1STpriAIqCo+eA+Xok08+cf10n5OTo4ULF2rfvn16+umnZbfbJf1+I+jUqVN12223qV+/fsrJyVFKSoquueYat3tl2rRpo759+2r27NnKy8vTddddp9TUVO3fv/+Caurdu7fuu+8+zZ49W3FxcaUeK46Ojlbnzp0VExOj0NBQbdmyRe+9954SExNdYz788EMNHjxYc+fO1aBBg/70eAsWLNBtt92mHj16uM5k1KxZ0/UNyl988YUrkEjSgw8+qFdeeUWDBg1Senq6GjZsqPfee08bNmzQ9OnTXffodO/eXV26dNEzzzyjAwcOqHXr1lqzZo0++ugjPfHEE657dMrrdUtNTdXp06dLtffo0eO8az7f9/pi+fr66oMPPlBsbKxuuukm3Xvvvbr++uvl6+urXbt2aeHChapZs6YmTZokX19f/fOf/9TgwYPVqVMn9e3b1/XoecOGDTV8+HDXfv/+979r6tSpiouL05AhQ5STk6OXX35ZzZs3d92MfqFiYmIk/X4DeVxcnLy9vc96thEoV558FAwwxdkePQ8ICLDatGljzZkzxyouLnYb//rrr1uNGze2/P39raioKGvu3LnWuHHjSj2Se+rUKeuxxx6zwsLCrOrVq1vdu3e3Dh48eF6PUJdwOp1WYGBgqUelSzz77LNW+/btrZCQECswMNCKioqyJk2aZBUUFJSa39y5c8/rmKdOnbKmT59uORwOy263Wz4+PlZERIR1xx13WG+//bZVWFjoNj47O9saPHiwVatWLcvPz89q2bLlWY91/Phxa/jw4Va9evUsX19fq3Hjxtbzzz9f6vW9mNet5DHqcy1vvfXWBdV8vu+1JCshIaHU9g0aNLAGDhz4pzWXOHbsmDV27FirZcuWVrVq1ayAgACrRYsWVlJSknX48GG3sYsXL7auvfZay9/f3woNDbX69+9v/fzzz6X2uWDBAuuqq66y/Pz8rDZt2lirV68+56PnZ3tc/79f88LCQmvYsGFW7dq1LZvNxmPoqBQ2y+LOMQAAYC7u2QEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphR79/o6vT6eT3twAAYCDCjn7/KvXg4ODz/q3BAADg0kHYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNF8PF0AAFSW02eKlHn0P54uA7gsRIZWU4Cvt6fLkETYAXAZyTz6H439aKenywAuC8l3tVCT8CBPlyGJy1gAAMBwhB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDSPhp3x48fLZrO5LVFRUa7+06dPKyEhQWFhYapRo4Z69eql7Oxst31kZmYqPj5e1apVU506dTRy5EgVFhZW9lQAAEAV5ePpApo3b65169a51n18/l9Jw4cP18cff6wlS5YoODhYiYmJ6tmzpzZs2CBJKioqUnx8vCIiIrRx40YdPnxY999/v3x9ffV///d/lT4XAABQ9Xg87Pj4+CgiIqJUe15enl5//XUtXLhQN998syRp7ty5atasmTZt2qSOHTtqzZo1+u6777Ru3TqFh4erTZs2mjhxokaNGqXx48fLz8+vsqcDAACqGI/fs7Nv3z7Vq1dPV111lfr376/MzExJUnp6us6cOaPY2FjX2KioKEVGRiotLU2SlJaWppYtWyo8PNw1Ji4uTk6nU7t27TrnMfPz8+V0Ot0WAABgJo+GnQ4dOmjevHlatWqV5syZox9//FE33nijjh8/rqysLPn5+SkkJMRtm/DwcGVlZUmSsrKy3IJOSX9J37lMnjxZwcHBrqV+/frlOzEAAFBlePQyVrdu3Vx/btWqlTp06KAGDRro3XffVWBgYIUdNykpSSNGjHCtO51OAg8AAIby+GWsPwoJCVGTJk20f/9+RUREqKCgQLm5uW5jsrOzXff4RERElHo6q2T9bPcBlfD395fdbndbAACAmapU2Dlx4oS+//571a1bVzExMfL19VVqaqqrPyMjQ5mZmXI4HJIkh8OhHTt2KCcnxzVm7dq1stvtio6OrvT6AQBA1ePRy1hPPfWUunfvrgYNGujQoUMaN26cvL291bdvXwUHB2vIkCEaMWKEQkNDZbfbNWzYMDkcDnXs2FGS1LVrV0VHR2vAgAGaMmWKsrKyNHr0aCUkJMjf39+TUwMAAFWER8POzz//rL59++rIkSOqXbu2brjhBm3atEm1a9eWJE2bNk1eXl7q1auX8vPzFRcXp9mzZ7u29/b21ooVK/TII4/I4XCoevXqGjhwoJKTkz01JQAAUMXYLMuyPF2EpzmdTgUHBysvL4/7dwCD7c0+rrEf7fR0GcBlIfmuFmoSHuTpMiRVsXt2AAAAyhthBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjFZlws5zzz0nm82mJ554wtV2+vRpJSQkKCwsTDVq1FCvXr2UnZ3ttl1mZqbi4+NVrVo11alTRyNHjlRhYWElVw8AAKqqKhF2Nm/erFdeeUWtWrVyax8+fLiWL1+uJUuW6PPPP9ehQ4fUs2dPV39RUZHi4+NVUFCgjRs3av78+Zo3b57Gjh1b2VMAAABVlMfDzokTJ9S/f3/961//Us2aNV3teXl5ev311zV16lTdfPPNiomJ0dy5c7Vx40Zt2rRJkrRmzRp99913WrBggdq0aaNu3bpp4sSJSklJUUFBgaemBAAAqhCPh52EhATFx8crNjbWrT09PV1nzpxxa4+KilJkZKTS0tIkSWlpaWrZsqXCw8NdY+Li4uR0OrVr165zHjM/P19Op9NtAQAAZvLx5MEXLVqkrVu3avPmzaX6srKy5Ofnp5CQELf28PBwZWVlucb8MeiU9Jf0ncvkyZM1YcKEi6weAABcCjx2ZufgwYN6/PHH9fbbbysgIKBSj52UlKS8vDzXcvDgwUo9PgAAqDweCzvp6enKycnR3/72N/n4+MjHx0eff/65Zs6cKR8fH4WHh6ugoEC5ublu22VnZysiIkKSFBERUerprJL1kjFn4+/vL7vd7rYAAAAzeSzs3HLLLdqxY4e2bdvmWtq2bav+/fu7/uzr66vU1FTXNhkZGcrMzJTD4ZAkORwO7dixQzk5Oa4xa9euld1uV3R0dKXPCQAAVD0eu2cnKChILVq0cGurXr26wsLCXO1DhgzRiBEjFBoaKrvdrmHDhsnhcKhjx46SpK5duyo6OloDBgzQlClTlJWVpdGjRyshIUH+/v6VPicAAFD1ePQG5b8ybdo0eXl5qVevXsrPz1dcXJxmz57t6vf29taKFSv0yCOPyOFwqHr16ho4cKCSk5M9WDUAAKhKbJZlWZ4uwtOcTqeCg4OVl5fH/TuAwfZmH9fYj3Z6ugzgspB8Vws1CQ/ydBmSqsD37AAAAFQkwg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAo5Up7Fx11VU6cuRIqfbc3FxdddVVF10UAABAeSlT2Dlw4ICKiopKtefn5+uXX3656KIAAADKi8+FDF62bJnrz6tXr1ZwcLBrvaioSKmpqWrYsGG5FQcAAHCxLijs9OjRQ5Jks9k0cOBAtz5fX181bNhQL774YrkVBwAAcLEuKOwUFxdLkho1aqTNmzerVq1aFVIUAABAebmgsFPixx9/LO86AAAAKkSZwo4kpaamKjU1VTk5Oa4zPiXeeOONiy4MAACgPJQp7EyYMEHJyclq27at6tatK5vNVt51AQAAlIsyhZ2XX35Z8+bN04ABA8q7HgAAgHJVpu/ZKSgo0HXXXVfetQAAAJS7MoWdoUOHauHCheVdCwAAQLkr02Ws06dP69VXX9W6devUqlUr+fr6uvVPnTq1XIoDAAC4WGUKO99++63atGkjSdq5c6dbHzcrAwCAqqRMl7E+++yzcy6ffvrpee9nzpw5atWqlex2u+x2uxwOhz755BNX/+nTp5WQkKCwsDDVqFFDvXr1UnZ2tts+MjMzFR8fr2rVqqlOnToaOXKkCgsLyzItAABgoDKFnfJy5ZVX6rnnnlN6erq2bNmim2++WXfddZd27dolSRo+fLiWL1+uJUuW6PPPP9ehQ4fUs2dP1/ZFRUWKj49XQUGBNm7cqPnz52vevHkaO3asp6YEAACqGJtlWdaFbtSlS5c/vVx1IWd3/ltoaKief/553X333apdu7YWLlyou+++W5K0Z88eNWvWTGlpaerYsaM++eQT3XHHHTp06JDCw8Ml/f5Y/KhRo/Trr7/Kz8/vvI7pdDoVHBysvLw82e32MtcOoGrbm31cYz/a+dcDAVy05LtaqEl4kKfLkFTGMztt2rRR69atXUt0dLQKCgq0detWtWzZskyFFBUVadGiRTp58qQcDofS09N15swZxcbGusZERUUpMjJSaWlpkqS0tDS1bNnSFXQkKS4uTk6n03V26Gzy8/PldDrdFgAAYKYy3aA8bdq0s7aPHz9eJ06cuKB97dixQw6HQ6dPn1aNGjX04YcfKjo6Wtu2bZOfn59CQkLcxoeHhysrK0uSlJWV5RZ0SvpL+s5l8uTJmjBhwgXVCQAALk3les/Offfdd8G/F6tp06batm2bvvrqKz3yyCMaOHCgvvvuu/Isq5SkpCTl5eW5loMHD1bo8QAAgOeU+ReBnk1aWpoCAgIuaBs/Pz9dc801kqSYmBht3rxZM2bMUO/evVVQUKDc3Fy3szvZ2dmKiIiQJEVEROjrr79221/J01olY87G399f/v7+F1QnAAC4NJUp7PzxiShJsixLhw8f1pYtWzRmzJiLKqi4uFj5+fmKiYmRr6+vUlNT1atXL0lSRkaGMjMz5XA4JEkOh0OTJk1STk6O6tSpI0lau3at7Ha7oqOjL6oOAABghjKFneDgYLd1Ly8vNW3aVMnJyeratet57ycpKUndunVTZGSkjh8/roULF2r9+vVavXq1goODNWTIEI0YMUKhoaGy2+0aNmyYHA6HOnbsKEnq2rWroqOjNWDAAE2ZMkVZWVkaPXq0EhISOHMDAAAklTHszJ07t1wOnpOTo/vvv1+HDx9WcHCwWrVqpdWrV+vWW2+V9PuN0F5eXurVq5fy8/MVFxen2bNnu7b39vbWihUr9Mgjj8jhcKh69eoaOHCgkpOTy6U+AABw6SvT9+yUSE9P1+7duyVJzZs317XXXltuhVUmvmcHuDzwPTtA5alK37NTpjM7OTk56tOnj9avX++6eTg3N1ddunTRokWLVLt27fKsEQAAoMzK9Oj5sGHDdPz4ce3atUtHjx7V0aNHtXPnTjmdTj322GPlXSMAAECZlenMzqpVq7Ru3To1a9bM1RYdHa2UlJQLukEZAACgopXpzE5xcbF8fX1Ltfv6+qq4uPiiiwIAACgvZQo7N998sx5//HEdOnTI1fbLL79o+PDhuuWWW8qtOAAAgItVprDz0ksvyel0qmHDhrr66qt19dVXq1GjRnI6nZo1a1Z51wgAAFBmZbpnp379+tq6davWrVunPXv2SJKaNWvm9hvKAQAAqoILOrPz6aefKjo6Wk6nUzabTbfeequGDRumYcOGqV27dmrevLm+/PLLiqoVAADggl1Q2Jk+fboeeOCBs37xXnBwsB566CFNnTq13IoDAAC4WBcUdrZv367bbrvtnP1du3ZVenr6RRcFAABQXi4o7GRnZ5/1kfMSPj4++vXXXy+6KAAAgPJyQWHniiuu0M6d5/69Mt9++63q1q170UUBAACUlwsKO7fffrvGjBmj06dPl+o7deqUxo0bpzvuuKPcigMAALhYF/To+ejRo/XBBx+oSZMmSkxMVNOmTSVJe/bsUUpKioqKivTMM89USKEAAABlcUFhJzw8XBs3btQjjzyipKQkWZYlSbLZbIqLi1NKSorCw8MrpFAAAICyuOAvFWzQoIFWrlypY8eOaf/+/bIsS40bN1bNmjUroj4AAICLUqZvUJakmjVrql27duVZCwAAQLkr0+/GAgAAuFQQdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0H08XcLk4faZImUf/4+kygMvGNbVryMvL5ukyAFQBhJ1Kknn0Pxr70U5PlwFcNuYOaq9AP29PlwGgCuAyFgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACM5tGwM3nyZLVr105BQUGqU6eOevTooYyMDLcxp0+fVkJCgsLCwlSjRg316tVL2dnZbmMyMzMVHx+vatWqqU6dOho5cqQKCwsrcyoAAKCK8mjY+fzzz5WQkKBNmzZp7dq1OnPmjLp27aqTJ0+6xgwfPlzLly/XkiVL9Pnnn+vQoUPq2bOnq7+oqEjx8fEqKCjQxo0bNX/+fM2bN09jx471xJQAAEAV49Hfer5q1Sq39Xnz5qlOnTpKT0/XTTfdpLy8PL3++utauHChbr75ZknS3Llz1axZM23atEkdO3bUmjVr9N1332ndunUKDw9XmzZtNHHiRI0aNUrjx4+Xn5+fJ6YGAACqiCp1z05eXp4kKTQ0VJKUnp6uM2fOKDY21jUmKipKkZGRSktLkySlpaWpZcuWCg8Pd42Ji4uT0+nUrl27KrF6AABQFXn0zM4fFRcX64knntD111+vFi1aSJKysrLk5+enkJAQt7Hh4eHKyspyjflj0CnpL+k7m/z8fOXn57vWnU5neU0DAABUMVXmzE5CQoJ27typRYsWVfixJk+erODgYNdSv379Cj8mAADwjCoRdhITE7VixQp99tlnuvLKK13tERERKigoUG5urtv47OxsRUREuMb899NZJeslY/5bUlKS8vLyXMvBgwfLcTYAAKAq8WjYsSxLiYmJ+vDDD/Xpp5+qUaNGbv0xMTHy9fVVamqqqy0jI0OZmZlyOBySJIfDoR07dignJ8c1Zu3atbLb7YqOjj7rcf39/WW3290WAABgJo/es5OQkKCFCxfqo48+UlBQkOsem+DgYAUGBio4OFhDhgzRiBEjFBoaKrvdrmHDhsnhcKhjx46SpK5duyo6OloDBgzQlClTlJWVpdGjRyshIUH+/v6enB4AAKgCPBp25syZI0nq3LmzW/vcuXM1aNAgSdK0adPk5eWlXr16KT8/X3FxcZo9e7ZrrLe3t1asWKFHHnlEDodD1atX18CBA5WcnFxZ0wAAAFWYR8OOZVl/OSYgIEApKSlKSUk555gGDRpo5cqV5VkaAAAwRJW4QRkAAKCiEHYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBoHg07X3zxhbp376569erJZrNp6dKlbv2WZWns2LGqW7euAgMDFRsbq3379rmNOXr0qPr37y+73a6QkBANGTJEJ06cqMRZAACAqsyjYefkyZNq3bq1UlJSzto/ZcoUzZw5Uy+//LK++uorVa9eXXFxcTp9+rRrTP/+/bVr1y6tXbtWK1as0BdffKEHH3ywsqYAAACqOB9PHrxbt27q1q3bWfssy9L06dM1evRo3XXXXZKkN998U+Hh4Vq6dKn69Omj3bt3a9WqVdq8ebPatm0rSZo1a5Zuv/12vfDCC6pXr16lzQUAAFRNVfaenR9//FFZWVmKjY11tQUHB6tDhw5KS0uTJKWlpSkkJMQVdCQpNjZWXl5e+uqrryq9ZgAAUPV49MzOn8nKypIkhYeHu7WHh4e7+rKyslSnTh23fh8fH4WGhrrGnE1+fr7y8/Nd606ns7zKBgAAVUyVPbNTkSZPnqzg4GDXUr9+fU+XBAAAKkiVDTsRERGSpOzsbLf27OxsV19ERIRycnLc+gsLC3X06FHXmLNJSkpSXl6eazl48GA5Vw8AAKqKKht2GjVqpIiICKWmprranE6nvvrqKzkcDkmSw+FQbm6u0tPTXWM+/fRTFRcXq0OHDufct7+/v+x2u9sCAADM5NF7dk6cOKH9+/e71n/88Udt27ZNoaGhioyM1BNPPKFnn31WjRs3VqNGjTRmzBjVq1dPPXr0kCQ1a9ZMt912mx544AG9/PLLOnPmjBITE9WnTx+exAIAAJI8HHa2bNmiLl26uNZHjBghSRo4cKDmzZunf/zjHzp58qQefPBB5ebm6oYbbtCqVasUEBDg2ubtt99WYmKibrnlFnl5ealXr16aOXNmpc8FAABUTR4NO507d5ZlWefst9lsSk5OVnJy8jnHhIaGauHChRVRHgAAMECVvWcHAACgPBB2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBohB0AAGA0wg4AADAaYQcAABiNsAMAAIxG2AEAAEYj7AAAAKMRdgAAgNEIOwAAwGiEHQAAYDTCDgAAMBphBwAAGI2wAwAAjEbYAQAARiPsAAAAoxF2AACA0Qg7AADAaMaEnZSUFDVs2FABAQHq0KGDvv76a0+XBAAAqgAjws7ixYs1YsQIjRs3Tlu3blXr1q0VFxennJwcT5cGAAA8zGZZluXpIi5Whw4d1K5dO7300kuSpOLiYtWvX1/Dhg3T008//ZfbO51OBQcHKy8vT3a7vUJqPH2mSJlH/1Mh+wZQ2jW1a8jLy+bWxucQqDyRodUU4Ovt6TIkST6eLuBiFRQUKD09XUlJSa42Ly8vxcbGKi0tzYOVuQvw9VaT8CBPlwFc1vgcApenSz7s/PbbbyoqKlJ4eLhbe3h4uPbs2XPWbfLz85Wfn+9az8vLk/T7GR4AAHBpCQoKks1mO2f/JR92ymLy5MmaMGFCqfb69et7oBoAAHAx/uo2lEs+7NSqVUve3t7Kzs52a8/OzlZERMRZt0lKStKIESNc68XFxTp69KjCwsL+NBni8uN0OlW/fn0dPHiwwu7nAnBufAZxPoKC/vzy9CUfdvz8/BQTE6PU1FT16NFD0u/hJTU1VYmJiWfdxt/fX/7+/m5tISEhFVwpLmV2u52/aAEP4jOIi3HJhx1JGjFihAYOHKi2bduqffv2mj59uk6ePKnBgwd7ujQAAOBhRoSd3r1769dff9XYsWOVlZWlNm3aaNWqVaVuWgYAAJcfI8KOJCUmJp7zshVQVv7+/ho3blypy54AKgefQZQHI75UEAAA4FyM+HURAAAA50LYAQAARiPsAAAAoxF2cNnp3LmznnjiiXLd5/r162Wz2ZSbm1uu+wVwcRo2bKjp06d7ugx4GGEHAAAYjbADAACMRtjBZamwsFCJiYkKDg5WrVq1NGbMGJV8C8Nbb72ltm3bKigoSBEREerXr59ycnLctl+5cqWaNGmiwMBAdenSRQcOHPDALIBLx/Hjx9W/f39Vr15ddevW1bRp09wuKR87dkz333+/atasqWrVqqlbt27at2+f2z7ef/99NW/eXP7+/mrYsKFefPFFt/6cnBx1795dgYGBatSokd5+++3Kmh6qOMIOLkvz58+Xj4+Pvv76a82YMUNTp07Va6+9Jkk6c+aMJk6cqO3bt2vp0qU6cOCABg0a5Nr24MGD6tmzp7p3765t27Zp6NChevrppz00E+DSMGLECG3YsEHLli3T2rVr9eWXX2rr1q2u/kGDBmnLli1atmyZ0tLSZFmWbr/9dp05c0aSlJ6ernvvvVd9+vTRjh07NH78eI0ZM0bz5s1z28fBgwf12Wef6b333tPs2bNL/aCCy5QFXGY6depkNWvWzCouLna1jRo1ymrWrNlZx2/evNmSZB0/ftyyLMtKSkqyoqOj3caMGjXKkmQdO3aswuoGLlVOp9Py9fW1lixZ4mrLzc21qlWrZj3++OPW3r17LUnWhg0bXP2//fabFRgYaL377ruWZVlWv379rFtvvdVtvyNHjnR9FjMyMixJ1tdff+3q3717tyXJmjZtWgXODpcCzuzgstSxY0fZbDbXusPh0L59+1RUVKT09HR1795dkZGRCgoKUqdOnSRJmZmZkqTdu3erQ4cObvtzOByVVzxwifnhhx905swZtW/f3tUWHByspk2bSvr9M+Xj4+P2uQoLC1PTpk21e/du15jrr7/ebb/XX3+963Nbso+YmBhXf1RUlEJCQipwZrhUEHaAPzh9+rTi4uJkt9v19ttva/Pmzfrwww8lSQUFBR6uDgBQFoQdXJa++uort/VNmzapcePG2rNnj44cOaLnnntON954o6Kiokpd82/WrJm+/vrrUtsDOLurrrpKvr6+2rx5s6stLy9Pe/fulfT7Z6qwsNDtc3nkyBFlZGQoOjraNWbDhg1u+92wYYOaNGkib29vRUVFqbCwUOnp6a7+jIwMvvsKkgg7uExlZmZqxIgRysjI0DvvvKNZs2bp8ccfV2RkpPz8/DRr1iz98MMPWrZsmSZOnOi27cMPP6x9+/Zp5MiRysjI0MKFC91ukgTgLigoSAMHDtTIkSP12WefadeuXRoyZIi8vLxks9nUuHFj3XXXXXrggQf073//W9u3b9d9992nK664QnfddZck6cknn1RqaqomTpyovXv3av78+XrppZf01FNPSZKaNm2q2267TQ899JC++uorpaena+jQoQoMDPTk1FFVePqmIaCyderUyXr00Uethx9+2LLb7VbNmjWt//3f/3XdsLxw4UKrYcOGlr+/v+VwOKxly5ZZkqxvvvnGtY/ly5db11xzjeXv72/deOON1htvvMENysCfcDqdVr9+/axq1apZERER1tSpU6327dtbTz/9tGVZlnX06FFrwIABVnBwsBUYGGjFxcVZe/fuddvHe++9Z0VHR1u+vr5WZGSk9fzzz7v1Hz582IqPj7f8/f2tyMhI680337QaNGjADcqwbJb1/3+5CAAAleTkyZO64oor9OKLL2rIkCGeLgeG8/F0AQAA833zzTfas2eP2rdvr7y8PCUnJ0uS6zIVUJEIOwCASvHCCy8oIyNDfn5+iomJ0ZdffqlatWp5uixcBriMBQAAjMbTWAAAwGiEHQAAYDTCDgAAMBphBwAAGI2wA+CSdeDAAdlsNm3bts3TpQCowgg7AADAaIQdAABgNMIOgCqvuLhYU6ZM0TXXXCN/f39FRkZq0qRJpcYVFRVpyJAhatSokQIDA9W0aVPNmDHDbcz69evVvn17Va9eXSEhIbr++uv1008/SZK2b9+uLl26KCgoSHa7XTExMdqyZUulzBFAxeEblAFUeUlJSfrXv/6ladOm6YYbbtDhw4e1Z8+eUuOKi4t15ZVXasmSJQoLC9PGjRv14IMPqm7durr33ntVWFioHj166IEHHtA777yjgoICff3117LZbJKk/v3769prr9WcOXPk7e2tbdu2ydfXt7KnC6Cc8Q3KAKq048ePq3bt2nrppZc0dOhQt74DBw6oUaNG+uabb9SmTZuzbp+YmKisrCy99957Onr0qMLCwrR+/Xp16tSp1Fi73a5Zs2Zp4MCBFTEVAB7CZSwAVdru3buVn5+vW2655bzGp6SkKCYmRrVr11aNGjX06quvKjMzU5IUGhqqQYMGKS4uTt27d9eMGTN0+PBh17YjRozQ0KFDFRsbq+eee07ff/99hcwJQOUi7ACo0gIDA8977KJFi/TUU09pyJAhWrNmjbZt26bBgweroKDANWbu3LlKS0vTddddp8WLF6tJkybatGmTJGn8+PHatWuX4uPj9emnnyo6Oloffvhhuc8JQOXiMhaAKu306dMKDQ3VzJkz//Iy1rBhw/Tdd98pNTXVNSY2Nla//fbbOb+Lx+FwqF27dpo5c2apvr59++rkyZNatmxZuc4JQOXizA6AKi0gIECjRo3SP/7xD7355pv6/vvvtWnTJr3++uulxjZu3FhbtmzR6tWrtXfvXo0ZM0abN2929f/4449KSkpSWlqafvrpJ61Zs0b79u1Ts2bNdOrUKSUmJmr9+vX66aeftGHDBm3evFnNmjWrzOkCqAA8jQWgyhszZox8fHw0duxYHTp0SHXr1tXDDz9catxDDz2kb775Rr1795bNZlPfvn316KOP6pNPPpEkVatWTXv27NH8+fN15MgR1a1bVwkJCXrooYdUWFioI0eO6P7771d2drZq1aqlnj17asKECZU9XQDljMtYAADAaFzGAgAARiPsAAAAoxF2AACA0Qg7AADAaIQdAABgNMIOAAAwGmEHAAAYjbADAACMRtgBAABGI+wAAACjEXYAAIDRCDsAAMBo/x9dWkm/NZ32CAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# plot the target variable \"class\"\n", + "p = sns.histplot(train[\"class\"], ec=\"w\", lw=4)\n", + "_ = p.set_title(\"Bad vs. Good Loan Count\")\n", + "_ = p.spines[\"top\"].set_visible(False)\n", + "_ = p.spines[\"right\"].set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "id": "c6a697a5-5709-4a69-b644-62779b4f8bc5", + "metadata": {}, + "source": [ + "Now, view the first few records of the context data." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "79424785-129d-4007-84a5-041b6d38457d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDclassoutcome_timestampdurationcredit_amountinstallment_commitmentchecking_statusresidence_sinceageexisting_creditsnum_dependentshousing
18473good2023-12-16 03:29:12+00:00612384no checking43612own
764894good2023-11-15 23:19:35+00:001811694no checking32921own
504318good2023-11-23 13:03:53+00:00127014no checking23221own
454340good2023-12-26 17:59:37+00:0024574320<=X<20042421for free
453605good2023-12-18 11:27:02+00:002428284<042211own
\n", + "
" + ], + "text/plain": [ + " ID class outcome_timestamp duration credit_amount \\\n", + "18 473 good 2023-12-16 03:29:12+00:00 6 1238 \n", + "764 894 good 2023-11-15 23:19:35+00:00 18 1169 \n", + "504 318 good 2023-11-23 13:03:53+00:00 12 701 \n", + "454 340 good 2023-12-26 17:59:37+00:00 24 5743 \n", + "453 605 good 2023-12-18 11:27:02+00:00 24 2828 \n", + "\n", + " installment_commitment checking_status residence_since age \\\n", + "18 4 no checking 4 36 \n", + "764 4 no checking 3 29 \n", + "504 4 no checking 2 32 \n", + "454 2 0<=X<200 4 24 \n", + "453 4 <0 4 22 \n", + "\n", + " existing_credits num_dependents housing \n", + "18 1 2 own \n", + "764 2 1 own \n", + "504 2 1 own \n", + "454 2 1 for free \n", + "453 1 1 own " + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# View first records in training data\n", + "train.head()" + ] + }, + { + "cell_type": "markdown", + "id": "fd52f5bc-aa0f-48db-b356-c52aa7ce3724", + "metadata": {}, + "source": [ + "### Feature Engineering" + ] + }, + { + "cell_type": "markdown", + "id": "3e5b5c02-ad4d-400e-bdac-bfdf2799f575", + "metadata": {}, + "source": [ + "Once data columns have been prepared so that they can be used to train an AI model, it is common to refer to them as \"features\". The process of preparing features is referred to as \"feature engineering\". \n", + "\n", + "Below, we will train a random forest model. Random forests are relatively robust to non-standardized, non-normalized data, making it easier for us to getting started. As such, the numerical columns are ready for a simple baseline training. \n", + "\n", + "We have pulled two categorical columns, wich we will need to engineer into numerical features." + ] + }, + { + "cell_type": "markdown", + "id": "45a6fb27-140c-4f5a-b464-1f5e5d81d086", + "metadata": {}, + "source": [ + "The `checking_status` column tells us roughly how much money the applicant has in their checking account, while the `housing` column shows the applicant's housing status. We presume that more money in checking correlates inversely with credit risk, while owing vs. renting, vs. living for free correlates directly with credit risk. Hence, converting these to ordinal features makes sense. Of course, in a real study we would want to quantitatively verify these presumptions." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "9e374096-b02d-4cbb-8fca-dcc451c90c50", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "checking_status\n", + "no checking 0.39375\n", + "0<=X<200 0.27500\n", + "<0 0.26125\n", + ">=200 0.07000\n", + "Name: proportion, dtype: float64" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Inspect the `checking_status` column distibution\n", + "train.checking_status.value_counts(normalize=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "0144b525-244b-4526-8e4b-d393cb174d06", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "housing\n", + "own 0.7225\n", + "rent 0.1675\n", + "for free 0.1100\n", + "Name: proportion, dtype: float64" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Inspect the `housing` column distribution\n", + "train.housing.value_counts(normalize=True)" + ] + }, + { + "cell_type": "markdown", + "id": "2cb340b4-7d21-4810-8be2-1633da2e4396", + "metadata": {}, + "source": [ + "We define a tranformer that can be used to convert `checking_status` and `housing` to ordinal variables. The transformer will also drop the non-feature columns (`class`, `ID`, and `application_timestamp`) from the feature data." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "27796e23-c12e-4e51-8fb4-090b26aff2ef", + "metadata": {}, + "outputs": [], + "source": [ + "# Feature lists\n", + "cat_features = [\"checking_status\", \"housing\"]\n", + "num_features = [\n", + " \"duration\", \"credit_amount\", \"installment_commitment\",\n", + " \"residence_since\", \"age\", \"existing_credits\", \"num_dependents\"\n", + "]\n", + "\n", + "# Ordinal encoder for cat_features\n", + "# (We use a ColumnTransformer to passthrough numerical feature columns)\n", + "col_transform = ColumnTransformer([\n", + " (\"cat_features\", OrdinalEncoder(), cat_features),\n", + " (\"num_features\", \"passthrough\", num_features),\n", + " ],\n", + " remainder=\"drop\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "318429b9-e008-4cc7-8108-779934f9ac2f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
checking_statushousingdurationcredit_amountinstallment_commitmentresidence_sinceageexisting_creditsnum_dependents
183.01.06.01238.04.04.036.01.02.0
7643.01.018.01169.04.03.029.02.01.0
5043.01.012.0701.04.02.032.02.01.0
4540.00.024.05743.02.04.024.02.01.0
4531.01.024.02828.04.04.022.01.01.0
\n", + "
" + ], + "text/plain": [ + " checking_status housing duration credit_amount \\\n", + "18 3.0 1.0 6.0 1238.0 \n", + "764 3.0 1.0 18.0 1169.0 \n", + "504 3.0 1.0 12.0 701.0 \n", + "454 0.0 0.0 24.0 5743.0 \n", + "453 1.0 1.0 24.0 2828.0 \n", + "\n", + " installment_commitment residence_since age existing_credits \\\n", + "18 4.0 4.0 36.0 1.0 \n", + "764 4.0 3.0 29.0 2.0 \n", + "504 4.0 2.0 32.0 2.0 \n", + "454 2.0 4.0 24.0 2.0 \n", + "453 4.0 4.0 22.0 1.0 \n", + "\n", + " num_dependents \n", + "18 2.0 \n", + "764 1.0 \n", + "504 1.0 \n", + "454 1.0 \n", + "453 1.0 " + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Check the tranform outputs features as expected\n", + "# (Note: transform output is an array, so we convert it\n", + "# back to dataframe for inspection)\n", + "pd.DataFrame(\n", + " index=train.index,\n", + " columns=cat_features + num_features,\n", + " data= col_transform.fit_transform(train)\n", + ").head()" + ] + }, + { + "cell_type": "markdown", + "id": "a3785c93-8830-4fa2-bb9d-31b6e8fecb01", + "metadata": {}, + "source": [ + "Finally, let's separate out the labels, and engineer them from categorical (\"good\" | \"bad\") to float (1.0 | 0.0). We do this for both the training and validation data." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "30ebff90-a193-43a2-86fb-cf09e7d03777", + "metadata": {}, + "outputs": [], + "source": [ + "# Make \"class\" target variable numeric\n", + "train_y = (train[\"class\"] == \"good\").astype(float)\n", + "validate_y = (validate[\"class\"] == \"good\").astype(float)" + ] + }, + { + "cell_type": "markdown", + "id": "b052f6b2-2a34-441d-8a5f-2aad4e4db022", + "metadata": {}, + "source": [ + "### Train the Model" + ] + }, + { + "cell_type": "markdown", + "id": "c4f14590-31f4-4680-b1a1-75755a78513e", + "metadata": {}, + "source": [ + "Now that the features are prepared, we can train (fit) our baseline model on the feature data." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "0ff48f34-dbb6-4221-aefc-3c9b3f9da3e3", + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the model\n", + "rf_model = RandomForestClassifier(\n", + " n_estimators=400,\n", + " criterion=\"entropy\",\n", + " max_depth=4,\n", + " min_samples_leaf=10,\n", + " class_weight={0:5, 1:1},\n", + " random_state=SEED\n", + ")\n", + "\n", + "# Package transform and model in pipeline\n", + "model = Pipeline([(\"transform\", col_transform), (\"rf_model\", rf_model)])" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "1d6ef38a-23b0-4056-a108-960495521164", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
Pipeline(steps=[('transform',\n",
+       "                 ColumnTransformer(transformers=[('cat_features',\n",
+       "                                                  OrdinalEncoder(),\n",
+       "                                                  ['checking_status',\n",
+       "                                                   'housing']),\n",
+       "                                                 ('num_features', 'passthrough',\n",
+       "                                                  ['duration', 'credit_amount',\n",
+       "                                                   'installment_commitment',\n",
+       "                                                   'residence_since', 'age',\n",
+       "                                                   'existing_credits',\n",
+       "                                                   'num_dependents'])])),\n",
+       "                ('rf_model',\n",
+       "                 RandomForestClassifier(class_weight={0: 5, 1: 1},\n",
+       "                                        criterion='entropy', max_depth=4,\n",
+       "                                        min_samples_leaf=10, n_estimators=400,\n",
+       "                                        random_state=142))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + ], + "text/plain": [ + "Pipeline(steps=[('transform',\n", + " ColumnTransformer(transformers=[('cat_features',\n", + " OrdinalEncoder(),\n", + " ['checking_status',\n", + " 'housing']),\n", + " ('num_features', 'passthrough',\n", + " ['duration', 'credit_amount',\n", + " 'installment_commitment',\n", + " 'residence_since', 'age',\n", + " 'existing_credits',\n", + " 'num_dependents'])])),\n", + " ('rf_model',\n", + " RandomForestClassifier(class_weight={0: 5, 1: 1},\n", + " criterion='entropy', max_depth=4,\n", + " min_samples_leaf=10, n_estimators=400,\n", + " random_state=142))])" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Fit the model\n", + "model.fit(train, train_y)" + ] + }, + { + "cell_type": "markdown", + "id": "73c45c39-9d8e-4f76-aca5-9f0c1568d263", + "metadata": {}, + "source": [ + "### Evaluate the Model" + ] + }, + { + "cell_type": "markdown", + "id": "ef58d432-80ba-428f-b59f-621a9e53b331", + "metadata": {}, + "source": [ + "Let's evaluate our baseline model performance. With credit risk, recall is going to be an important measure to look at. We compare the performance on the training data, with the performance on the validation data through a classification report." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "8c5472f6-2ddc-437d-8102-4d5bd2c9f39c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " precision recall f1-score support\n", + "\n", + " 0.0 0.42 0.92 0.58 232\n", + " 1.0 0.94 0.49 0.64 568\n", + "\n", + " accuracy 0.61 800\n", + " macro avg 0.68 0.70 0.61 800\n", + "weighted avg 0.79 0.61 0.63 800\n", + "\n" + ] + } + ], + "source": [ + "# Evaluate training set performance\n", + "train_preds = model.predict(train)\n", + "print(classification_report(train_y, train_preds))" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c296bbd3-603e-4615-abbe-2689ebcf5d8c", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " precision recall f1-score support\n", + "\n", + " 0.0 0.46 0.87 0.61 68\n", + " 1.0 0.88 0.48 0.62 132\n", + "\n", + " accuracy 0.61 200\n", + " macro avg 0.67 0.68 0.61 200\n", + "weighted avg 0.74 0.61 0.62 200\n", + "\n" + ] + } + ], + "source": [ + "# Evaluate validation data performance\n", + "print(classification_report(validate_y, model.predict(validate)))" + ] + }, + { + "cell_type": "markdown", + "id": "d57ffbdc-f0b3-4fb6-9575-5acd983082cf", + "metadata": {}, + "source": [ + "The recall on the validation set for bad loans (0 class) is 0.87, meaning that the model correctly identified close to 90% of the bad loans. However, the precision of 0.46 tells us that the model is also classifying many loans that were actually good as bad. Precision and recall are technical metrics. In order to truly assess the models value, we would need feedback from the business side on the impact of misclassifications (for both good and bad loans).\n", + "\n", + "The difference in performance on the training vs. validation data, tells us that the model is slightly overfitting the data. Remember that this is just a quick baseline model. To improve further, we could do things like:\n", + "- gather more data\n", + "- engineer features\n", + "- experiment with hyperparameter settings\n", + "- experiment with other model types\n", + "\n", + "In fact, this is just a start. Creating AI models that meet business needs often requires a lot of guided experimentation." + ] + }, + { + "cell_type": "markdown", + "id": "0378d21a-d6db-42f9-851a-ce71f68c6802", + "metadata": {}, + "source": [ + "### Save the Model" + ] + }, + { + "cell_type": "markdown", + "id": "4450a328-f00c-4579-8e08-b2ebe5046961", + "metadata": {}, + "source": [ + "The last thing we do is save our trained model, so that we can pick it up later in the serving environment." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "da7a7906-d54f-4f2d-9803-6c82c86b28ad", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['rf_model.pkl']" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Save the model to a pickle file\n", + "joblib.dump(model, \"rf_model.pkl\")" + ] + }, + { + "cell_type": "markdown", + "id": "299588b8-ab67-4155-97a9-770e8e4a7476", + "metadata": {}, + "source": [ + "In the next notebook, [04_Credit_Risk_Model_Serving.ipynb](04_Credit_Risk_Model_Serving.ipynb), we will load the trained model and request predictions, with input features provided by the Feast online feature server." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/credit-risk-end-to-end/04_Credit_Risk_Model_Serving.ipynb b/examples/credit-risk-end-to-end/04_Credit_Risk_Model_Serving.ipynb new file mode 100644 index 00000000000..f263dd6cd7b --- /dev/null +++ b/examples/credit-risk-end-to-end/04_Credit_Risk_Model_Serving.ipynb @@ -0,0 +1,697 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9c870dcb-c66d-454d-a3fa-5f9a723bf8af", + "metadata": {}, + "source": [ + "
" + ] + }, + { + "cell_type": "markdown", + "id": "339ab741-ac90-4763-9971-3b274f6a90b4", + "metadata": {}, + "source": [ + "# Credit Risk Model Serving" + ] + }, + { + "cell_type": "markdown", + "id": "31d29794-4c33-4bc1-9bb4-e238c59f882d", + "metadata": {}, + "source": [ + "### Introduction" + ] + }, + { + "cell_type": "markdown", + "id": "d6553fe7-5427-4ecc-b638-615b47acf1a8", + "metadata": {}, + "source": [ + "Model serving is an exciting part of AI/ML. All of our previous work was building to this phase where we can actually serve loan predictions. \n", + "\n", + "So what role does Feast play in model serving? We've already seen that Feast can \"materialize\" data from the training offline store to the serving online store. This comes in handy because many models need contextual features at inference time. \n", + "\n", + "With this example, we can imagine a scenario something like this:\n", + "1. A bank customer submits a loan application on a website. \n", + "2. The website backend requests features, supplying the customer's ID as input.\n", + "3. The backend retrieves feature data for the ID in question.\n", + "4. The backend submits the feature data to the model to obtain a prediction.\n", + "5. The backend uses the prediction to make a decision.\n", + "6. The response is recorded and made available to the user.\n", + "\n", + "With online requests like this, time and resource usage often matter a lot. Feast facilitates quickly retrieving the correct feature data.\n", + "\n", + "In real-life, some of the contextual feature data points could be requested from the user, while others are retrieved from data sources. While outside the scope of this example, Feast does facilitate retrieving request data, and joining it with feature data. (See [Request Source](https://rtd.feast.dev/en/master/#request-source)).\n", + "\n", + "In this notebook, we request feature data from the online store for a small batch of users. We then get outcome predictions from our trained model. This notebook is a continuation of the work done in the previous notebooks; it comes as the step after [03_Credit_Risk_Model_Training.ipynb](03_Credit_Risk_Model_Training.ipynb)." + ] + }, + { + "cell_type": "markdown", + "id": "53818109-c357-435f-8a8b-2a62982fa9a8", + "metadata": {}, + "source": [ + "### Setup" + ] + }, + { + "cell_type": "markdown", + "id": "92b5ab1b-186d-4b76-aac7-9b5110f8673e", + "metadata": {}, + "source": [ + "*The following code assumes that you have read the example README.md file, and that you have setup an environment where the code can be run. Please make sure you have addressed the prerequisite needs.*" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "378189ed-e967-4b2b-b591-aab980a685b3", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import os\n", + "import joblib\n", + "import json\n", + "import requests\n", + "import warnings\n", + "import pandas as pd\n", + "\n", + "from feast import FeatureStore" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "ea90edb2-16f0-4d40-a280-4e6ea79ea5be", + "metadata": {}, + "outputs": [], + "source": [ + "# ingnore warnings\n", + "warnings.filterwarnings(action=\"ignore\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "55f8ed91-7c13-44f7-a294-b6cacd43f8db", + "metadata": {}, + "outputs": [], + "source": [ + "# Load the model\n", + "model = joblib.load(\"rf_model.pkl\")" + ] + }, + { + "cell_type": "markdown", + "id": "3093e1b6-66d9-4936-b197-d853631914db", + "metadata": {}, + "source": [ + "### Query Feast Online Server for Feature Data" + ] + }, + { + "cell_type": "markdown", + "id": "2b5bbc4a-e2d3-4b7b-8309-434ff3b3e2cf", + "metadata": {}, + "source": [ + "Here, we show two different ways to retrieve data from the online feature server. The first is using the Python `requests` library, and the second is using the Feast Python SDK.\n", + "\n", + "We can use the Python requests library to request feature data from the online feature server (that we deployed in notebook [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb)). The request takes the form of an HTTP POST command sent to the server endpoint (`url`). We request the data we need by supplying the entity and feature information in the data payload. We also need to specify an `application/json` content type in the request header." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "c6fd4f1a-917b-4a98-9bf6-101b4a074b64", + "metadata": {}, + "outputs": [], + "source": [ + "# ID examples\n", + "ids = [18, 764, 504, 454, 453, 0, 1, 2, 3, 4, 5, 6, 7, 8]\n", + "\n", + "# Submit get_online_features request to Feast online store server\n", + "response = requests.post(\n", + " url=\"http://localhost:6566/get-online-features\",\n", + " headers = {'Content-Type': 'application/json'},\n", + " data=json.dumps({\n", + " \"entities\": {\"ID\": ids},\n", + " \"features\": [\n", + " \"data_a:duration\",\n", + " \"data_a:credit_amount\",\n", + " \"data_a:installment_commitment\",\n", + " \"data_a:checking_status\",\n", + " \"data_b:residence_since\",\n", + " \"data_b:age\",\n", + " \"data_b:existing_credits\",\n", + " \"data_b:num_dependents\",\n", + " \"data_b:housing\"\n", + " ]\n", + " })\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "8e616a52-c18c-44a9-9e63-3aba071d7e79", + "metadata": {}, + "source": [ + "The response is returned as JSON, with feature values for each of the IDs." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cf8948b7-4ed7-4c45-8acf-462331d9e4d2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'{\"metadata\":{\"feature_names\":[\"ID\",\"checking_status\",\"duration\",\"installment_commitment\",\"credit_amount\",\"residence_since\",\"num_dependents\",\"age\",\"housing\",\"existing_credits\"]},\"results\":[{\"values\":[18,764,504,454,453,0,1,2,3,4,5,6,7,8],\"statuses\":[\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\"],\"event_timestamps\":[\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\",\"1970-01-01T00:00:00Z\"]},{\"values\":[\"0<=X<200\",\"no checking\",\"<0\",\"<0\",\"no checking\",\"<0\",\"0<=X<200\",\"no checking\",\"<0\",\"<0\",\"no checking\",\"no checking\",\"0<=X<200\",\"no checking\"],\"statuses\":[\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",\"PRESENT\",'" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Show first 1000 characters of response\n", + "response.text[:1000]" + ] + }, + { + "cell_type": "markdown", + "id": "c719f702-578a-4f35-b8ff-e41707cda23e", + "metadata": {}, + "source": [ + "As the response data comes in JSON format, there is a little formatting required to organize the data into a dataframe with one record per row (and features as columns)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "b992063d-8d83-4bf7-8153-f690b0410359", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDchecking_statusdurationinstallment_commitmentcredit_amountresidence_sincenum_dependentsagehousingexisting_credits
0180<=X<20024.04.012579.02.01.044.0for free1.0
1764no checking24.04.02463.03.01.027.0own2.0
2504<024.04.01207.04.01.024.0rent1.0
\n", + "
" + ], + "text/plain": [ + " ID checking_status duration installment_commitment credit_amount \\\n", + "0 18 0<=X<200 24.0 4.0 12579.0 \n", + "1 764 no checking 24.0 4.0 2463.0 \n", + "2 504 <0 24.0 4.0 1207.0 \n", + "\n", + " residence_since num_dependents age housing existing_credits \n", + "0 2.0 1.0 44.0 for free 1.0 \n", + "1 3.0 1.0 27.0 own 2.0 \n", + "2 4.0 1.0 24.0 rent 1.0 " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Inspect the response\n", + "resp_data = json.loads(response.text)\n", + "\n", + "# Transform JSON into dataframe\n", + "records = pd.DataFrame(\n", + " columns=resp_data[\"metadata\"][\"feature_names\"], \n", + " data=[[r[\"values\"][i] for r in resp_data[\"results\"]] for i in range(len(ids))]\n", + ")\n", + "records.head(3)" + ] + }, + { + "cell_type": "markdown", + "id": "6db9b8ac-146e-40d3-b35a-cf4f4b6bbc8a", + "metadata": {}, + "source": [ + "Now, let's see how we can do the same with the Feast Python SDK. Note that we instantiate our `FeatureStore` object with the configuration that we set up in [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb), by pointing to the `./Feature_Store` directory." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "765dc62b-e1e7-45fe-88b4-cc0235519ff8", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:_list_feature_views will make breaking changes. Please use _list_batch_feature_views instead. _list_feature_views will behave like _list_all_feature_views in the future.\n", + "WARNING:root:Cannot use sqlite_vec for vector search\n" + ] + } + ], + "source": [ + "# Instantiate FeatureStore object\n", + "store = FeatureStore(repo_path=\"./Feature_Store\")\n", + "\n", + "# Retrieve features\n", + "records = store.get_online_features(\n", + " entity_rows=[{\"ID\":v} for v in ids],\n", + " features=[\n", + " \"data_a:duration\",\n", + " \"data_a:credit_amount\",\n", + " \"data_a:installment_commitment\",\n", + " \"data_a:checking_status\",\n", + " \"data_b:residence_since\",\n", + " \"data_b:age\",\n", + " \"data_b:existing_credits\",\n", + " \"data_b:num_dependents\",\n", + " \"data_b:housing\" \n", + " ]\n", + ").to_df()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1d214e55-df0b-460d-936c-8951f7365a93", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDcredit_amountinstallment_commitmentchecking_statusdurationnum_dependentshousingageresidence_sinceexisting_credits
01812579.04.00<=X<20024.01.0for free44.02.01.0
17642463.04.0no checking24.01.0own27.03.02.0
25041207.04.0<024.01.0rent24.04.01.0
\n", + "
" + ], + "text/plain": [ + " ID credit_amount installment_commitment checking_status duration \\\n", + "0 18 12579.0 4.0 0<=X<200 24.0 \n", + "1 764 2463.0 4.0 no checking 24.0 \n", + "2 504 1207.0 4.0 <0 24.0 \n", + "\n", + " num_dependents housing age residence_since existing_credits \n", + "0 1.0 for free 44.0 2.0 1.0 \n", + "1 1.0 own 27.0 3.0 2.0 \n", + "2 1.0 rent 24.0 4.0 1.0 " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "records.head(3)" + ] + }, + { + "cell_type": "markdown", + "id": "fd828758-6c57-4f9e-bbda-3983b6579da2", + "metadata": {}, + "source": [ + "### Get Predictions from the Model" + ] + }, + { + "cell_type": "markdown", + "id": "f446d7ec-0dae-409a-82a2-c0d7016c2001", + "metadata": {}, + "source": [ + "Now we can request predictions from our trained model. \n", + "\n", + "For convenience, we output the predictions along with the implied loan designations. Remember that these are predictions on loan outcomes, given context data from the loan application process. Since we have access to the actual `class` outcomes, we display those as well to see how the model did.|" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "70203f7b-f1e5-46ba-8623-f10bf3a5abf8", + "metadata": {}, + "outputs": [], + "source": [ + "# Get predictions from the model\n", + "preds = model.predict(records)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "27001dde-8bdb-4de1-8c33-a76f030748e0", + "metadata": {}, + "outputs": [], + "source": [ + "# Load labels\n", + "labels = pd.read_parquet(\"Feature_Store/data/labels.parquet\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "ddc958e8-8ff8-49b1-ac10-fc965f3bf21c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDPredictionLoan_DesignationTrue_Value
18180.0badbad
7647641.0goodgood
5045040.0badbad
4544540.0badbad
4534531.0goodgood
001.0goodgood
110.0badbad
221.0goodgood
330.0badgood
440.0badbad
551.0goodgood
661.0goodgood
770.0badgood
881.0goodgood
\n", + "
" + ], + "text/plain": [ + " ID Prediction Loan_Designation True_Value\n", + "18 18 0.0 bad bad\n", + "764 764 1.0 good good\n", + "504 504 0.0 bad bad\n", + "454 454 0.0 bad bad\n", + "453 453 1.0 good good\n", + "0 0 1.0 good good\n", + "1 1 0.0 bad bad\n", + "2 2 1.0 good good\n", + "3 3 0.0 bad good\n", + "4 4 0.0 bad bad\n", + "5 5 1.0 good good\n", + "6 6 1.0 good good\n", + "7 7 0.0 bad good\n", + "8 8 1.0 good good" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Show preds\n", + "pd.DataFrame({\n", + " \"ID\": ids,\n", + " \"Prediction\": preds,\n", + " \"Loan_Designation\": [\"bad\" if i==0.0 else \"good\" for i in preds],\n", + " \"True_Value\": labels.loc[ids, \"class\"]\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "87cd592a-61fc-4553-b84a-941d1785910d", + "metadata": {}, + "source": [ + "It's important to remember that the model's predictions are like educated guesses based on learned patterns. The model will get some predictions right, and other wrong. With the example records above, it looks like the model did pretty good! An AI/ML team's task is generally to make the model's predictions as useful as possible in helping the organization make decisions (for example, on loan approvals).\n", + "\n", + "In this case, we have a baseline model. While not ready for production, this model has set a low bar by which other models can be measured. Teams can also use a model like this to help with early testing, and with proving out things like pipelines and infrastructure before more sophisticated models are available.\n", + "\n", + "We have used Feast to query the feature data in support of model serving. The next notebook, [05_Credit_Risk_Cleanup.ipynb](05_Credit_Risk_Cleanup.ipynb), cleans up resources created in this and previous notebooks." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/credit-risk-end-to-end/05_Credit_Risk_Cleanup.ipynb b/examples/credit-risk-end-to-end/05_Credit_Risk_Cleanup.ipynb new file mode 100644 index 00000000000..846748dc425 --- /dev/null +++ b/examples/credit-risk-end-to-end/05_Credit_Risk_Cleanup.ipynb @@ -0,0 +1,296 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "cf46ec61-7914-4677-b12b-a9e478e88d3f", + "metadata": {}, + "source": [ + "# Credit Risk Cleanup" + ] + }, + { + "cell_type": "markdown", + "id": "6ae8aaec-e01d-48d3-b768-98661ad1ec85", + "metadata": {}, + "source": [ + "Run this notebook if you are done experimenting with this demo, or if you wish to start again with a clean slate.\n", + "\n", + "**RUNNING THE FOLLOWING CODE WILL REMOVE FILES AND PROCESSES CREATED BY THE PREVIOUS EXAMPLE NOTEBOOKS.**\n", + "\n", + "The notebook progresses in reverse order of how the files and processes were added. (The reverse order makes it possible to partially revert changes by running cells up to a certain point.)" + ] + }, + { + "cell_type": "markdown", + "id": "6feaa771-4226-459f-b6dd-214024cb5c7c", + "metadata": {}, + "source": [ + "#### Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "20a39e94-920d-4108-aa6b-1e29d2224f71", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import os\n", + "import time\n", + "import psutil" + ] + }, + { + "cell_type": "markdown", + "id": "3f124260-a8b2-475d-9103-8d336c543fce", + "metadata": {}, + "source": [ + "#### Remove Trained Model File" + ] + }, + { + "cell_type": "markdown", + "id": "f7a05a2b-9a26-4722-a526-84da99fc0b29", + "metadata": {}, + "source": [ + "This removes the model that was created and saved in [03_Credit_Risk_Model_Training.ipynb](03_Credit_Risk_Model_Training.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a6b21063-ea43-4329-be0c-c1644c705db2", + "metadata": {}, + "outputs": [], + "source": [ + "# Remove the model file that was saved in model training.\n", + "model_path = \"./rf_model.pkl\"\n", + "os.remove(model_path)" + ] + }, + { + "cell_type": "markdown", + "id": "ed97c24a-8f25-4e77-9037-f9cf4ad68dfa", + "metadata": {}, + "source": [ + "#### Shutdown Servers" + ] + }, + { + "cell_type": "markdown", + "id": "2f825d10-c13d-4701-b102-e15ad1c0bd3b", + "metadata": {}, + "source": [ + "Shut down the servers that were launched in [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb); also remove the `server_proc.txt` that held the process PIDs." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "66db4d46-a895-4041-ad87-ab0a77f13211", + "metadata": {}, + "outputs": [], + "source": [ + "# Load server process objects\n", + "server_pids = open(\"server_proc.txt\").readlines()\n", + "offline_server_proc = psutil.Process(int(server_pids[0].strip()))\n", + "online_server_proc = psutil.Process(int(server_pids[1].strip()))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "001fd472-2e28-499e-9eac-0a16ad8187a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Online server : psutil.Process(pid=44621, name='python3.11', status='running', started='14:19:05')\n", + "Online server is running: True\n", + "\n", + "Offline server PID: psutil.Process(pid=44594, name='python3.11', status='running', started='14:19:03')\n", + "Offline server is running: True\n" + ] + } + ], + "source": [ + "# Verify if servers are running\n", + "def verify_servers():\n", + " # online server\n", + " print(f\"Online server : {online_server_proc}\")\n", + " print(f\"Online server is running: {online_server_proc.is_running()}\", end='\\n\\n')\n", + " # offline server\n", + " print(f\"Offline server PID: {offline_server_proc}\")\n", + " print(f\"Offline server is running: {offline_server_proc.is_running()}\")\n", + " \n", + "verify_servers()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "68376350-790a-4e7e-9325-c7de4d22e54b", + "metadata": {}, + "outputs": [], + "source": [ + "# Terminate offline server\n", + "offline_server_proc.terminate()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "446b6bf9-aef2-4873-b477-8bf595a8eabf", + "metadata": {}, + "outputs": [], + "source": [ + "# Terminate online server (master and worker)\n", + "for child in online_server_proc.children(recursive=True):\n", + " child.terminate()\n", + "online_server_proc.terminate()\n", + "time.sleep(2)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "774827f6-4dcd-495b-b5c5-186b97148619", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Online server : psutil.Process(pid=44621, name='python3.11', status='terminated', started='14:19:05')\n", + "Online server is running: False\n", + "\n", + "Offline server PID: psutil.Process(pid=44594, name='python3.11', status='terminated', started='14:19:03')\n", + "Offline server is running: False\n" + ] + } + ], + "source": [ + "# Verify termination\n", + "verify_servers()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "f8a155e4-23b3-4fb3-b868-02ba2e0a4a31", + "metadata": {}, + "outputs": [], + "source": [ + "# Remove server_proc.txt (file for keeping track of pids)\n", + "os.remove(\"server_proc.txt\")" + ] + }, + { + "cell_type": "markdown", + "id": "ed7d6f25-d255-4986-9cf2-9876f6c558cc", + "metadata": {}, + "source": [ + "#### Remove Feast Applied Configuration Files" + ] + }, + { + "cell_type": "markdown", + "id": "d73efe15-a1d9-459b-8142-835dc2bf1c9f", + "metadata": {}, + "source": [ + "Remove the registry and online store (SQLite) files created on`feast apply` created in [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "0f13a4ac-d2ad-462b-b65e-4266b7cb4922", + "metadata": {}, + "outputs": [], + "source": [ + "os.remove(\"Feature_Store/data/online_store.db\")\n", + "os.remove(\"Feature_Store/data/registry.db\")" + ] + }, + { + "cell_type": "markdown", + "id": "eb0494cd-0143-4f5f-b7d6-9675e1403d9f", + "metadata": {}, + "source": [ + "#### Remove Feast Configuration Files" + ] + }, + { + "cell_type": "markdown", + "id": "86c33ac7-9e1f-4798-9f14-77773a1c13bd", + "metadata": {}, + "source": [ + "Remove the configution and feature definition files created in [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a747043f-05fe-4b44-979d-9b30565074ee", + "metadata": {}, + "outputs": [], + "source": [ + "os.remove(\"Feature_Store/feature_store.yaml\")\n", + "os.remove(\"Feature_Store/feature_definitions.py\")" + ] + }, + { + "cell_type": "markdown", + "id": "81975a0f-7fd6-4ed3-91cf-812946df4713", + "metadata": {}, + "source": [ + "#### Remove Data Files" + ] + }, + { + "cell_type": "markdown", + "id": "8182dc1e-d5c1-4739-b7c7-0620e93c5b64", + "metadata": {}, + "source": [ + "Remove the data files created in [01_Credit_Risk_Data_Prep.ipynb](01_Credit_Risk_Data_Prep.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "4ddb4fb2-fea1-4b70-8978-732af9a1cd3f", + "metadata": {}, + "outputs": [], + "source": [ + "for f in [\"data_a.parquet\", \"data_b.parquet\", \"labels.parquet\"]:\n", + " os.remove(f\"Feature_Store/data/{f}\")\n", + "os.rmdir(\"Feature_Store/data\")\n", + "os.rmdir(\"Feature_Store\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/credit-risk-end-to-end/README.md b/examples/credit-risk-end-to-end/README.md new file mode 100644 index 00000000000..5f59c750784 --- /dev/null +++ b/examples/credit-risk-end-to-end/README.md @@ -0,0 +1,39 @@ + +![Feast_Logo](https://raw.githubusercontent.com/feast-dev/feast/master/docs/assets/feast_logo.png) + +# Feast Credit Risk Classification End-to-End Example + +This example starts with an [OpenML](https://openml.org) credit risk dataset, and walks through the steps of preparing the data, setting up feature store resources, and serving features; this is all done inside the paradigm of an ML workflow, with the goal of helping users understand how Feast fits in the progression from data preparation, to model training and model serving. + +The example is organized in five notebooks: +1. [01_Credit_Risk_Data_Prep.ipynb](01_Credit_Risk_Data_Prep.ipynb) +2. [02_Deploying_the_Feature_Store.ipynb](02_Deploying_the_Feature_Store.ipynb) +3. [03_Credit_Risk_Model_Training.ipynb](03_Credit_Risk_Model_Training.ipynb) +4. [04_Credit_Risk_Model_Serving.ipynb](04_Credit_Risk_Model_Serving.ipynb) +5. [05_Credit_Risk_Cleanup.ipynb](05_Credit_Risk_Cleanup.ipynb) + +Run the notebooks in order to progress through the example. See below for prerequisite setup steps. + +### Preparing your Environment +To run the example, install the Python dependencies. You may wish to do so inside a virtual environment. Open a command terminal, and run the following: + +``` +# create venv-example virtual environment +python -m venv venv-example +# activate environment +source venv-example/bin/activate +``` + +Install the Python dependencies: +``` +pip install -r requirements.txt +``` + +Note that this example was tested with Python 3.11, but it should also work with other similar versions. + +### Running the Notebooks +Once you have installed the Python dependencies, you can run the example notebooks. To run the notebooks locally, execute the following command in a terminal window: + +```jupyter notebook``` + +You should see a browser window open a page where you can navigate to the example notebook (.ipynb) files and open them. diff --git a/examples/credit-risk-end-to-end/requirements.txt b/examples/credit-risk-end-to-end/requirements.txt new file mode 100644 index 00000000000..8b9b1313e78 --- /dev/null +++ b/examples/credit-risk-end-to-end/requirements.txt @@ -0,0 +1,6 @@ +feast +jupyter==1.1.1 +scikit-learn==1.5.2 +pandas==2.2.3 +matplotlib==3.9.2 +seaborn==0.13.2 \ No newline at end of file diff --git a/examples/operator-postgres-tls-demo/.gitignore b/examples/operator-postgres-tls-demo/.gitignore new file mode 100644 index 00000000000..6eb45f3fbca --- /dev/null +++ b/examples/operator-postgres-tls-demo/.gitignore @@ -0,0 +1,4 @@ +postgres-tls-certs +values.yaml +.ipynb_checkpoints +*.tar.gz \ No newline at end of file diff --git a/examples/operator-postgres-tls-demo/01-Install-postgres-tls-using-helm.ipynb b/examples/operator-postgres-tls-demo/01-Install-postgres-tls-using-helm.ipynb new file mode 100644 index 00000000000..d385f3d8de1 --- /dev/null +++ b/examples/operator-postgres-tls-demo/01-Install-postgres-tls-using-helm.ipynb @@ -0,0 +1,557 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f16967ef", + "metadata": {}, + "source": [ + "# Deploy PostgreSQL with Helm in TLS Mode" + ] + }, + { + "cell_type": "markdown", + "id": "1247e2e7-706c-44a3-a45c-fba638e50f31", + "metadata": {}, + "source": [ + "### NOTE: This PostgreSQL setup guide is intended to demonstrate the capabilities of the Feast operator in configuring Feast with PostgreSQL in TLS mode. For ongoing assistance with Postgres setup, we recommend consulting the official Helm PostgreSQL documentation." + ] + }, + { + "cell_type": "markdown", + "id": "cce2278a", + "metadata": {}, + "source": [ + "## Step 1: Install Prerequisites" + ] + }, + { + "cell_type": "markdown", + "id": "3e4102d8", + "metadata": {}, + "source": [ + "Before starting, ensure you have the following installed:\n", + "- `kubectl` (Kubernetes CLI)\n", + "- `helm` (Helm CLI)\n", + "- A Kubernetes cluster (e.g., Minikube, GKE, EKS, or AKS)" + ] + }, + { + "cell_type": "markdown", + "id": "44b611ba-097e-4777-b77b-739116e7e4d6", + "metadata": {}, + "source": [ + "**Note:** When deploying PostgreSQL and Feast on a Kubernetes cluster, it's important to ensure that your cluster has sufficient resources to support both applications." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e2b40efc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Client Version: v1.31.2\n", + "Kustomize Version: v5.4.2\n", + "version.BuildInfo{Version:\"v3.17.0\", GitCommit:\"301108edc7ac2a8ba79e4ebf5701b0b6ce6a31e4\", GitTreeState:\"clean\", GoVersion:\"go1.23.4\"}\n" + ] + } + ], + "source": [ + "# Verify kubectl and helm are installed\n", + "!kubectl version --client\n", + "!helm version" + ] + }, + { + "cell_type": "markdown", + "id": "4b72fabe", + "metadata": {}, + "source": [ + "## Step 2: Add the Bitnami Helm Repository" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f439691e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\"bitnami\" already exists with the same configuration, skipping\n", + "Hang tight while we grab the latest from your chart repositories...\n", + "...Successfully got an update from the \"bitnami\" chart repository\n", + "Update Complete. ⎈Happy Helming!⎈\n" + ] + } + ], + "source": [ + "# Add the Bitnami Helm repository\n", + "!helm repo add bitnami https://charts.bitnami.com/bitnami\n", + "!helm repo update" + ] + }, + { + "cell_type": "markdown", + "id": "6f51e5c8-41ba-417e-a2fc-78cf5951d9dc", + "metadata": {}, + "source": [ + "## Step 3: create kubernetes feast namespace" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d114872a-7a43-4eca-8748-6dc7346dc176", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast created\n", + "Context \"kind-kind\" modified.\n" + ] + } + ], + "source": [ + "!kubectl create ns feast\n", + "!kubectl config set-context --current --namespace feast" + ] + }, + { + "cell_type": "markdown", + "id": "41f4e8db", + "metadata": {}, + "source": [ + "## Step 4: Generate Self Signed TLS Certificates" + ] + }, + { + "cell_type": "markdown", + "id": "c34957e4-dd7f-49c1-986c-eefe74dd7e22", + "metadata": {}, + "source": [ + "**Note**: \n", + "- Self signed certificates are used only for demo purpose, consider using a managed certificate service (e.g., Let's Encrypt) instead of self-signed certificates.\n", + "- \"Replace the `CN` values in the certificate generation step with your actual domain names.\"," + ] + }, + { + "cell_type": "markdown", + "id": "500f9010-6329-4868-83d5-9c063d5890f5", + "metadata": {}, + "source": [ + "Delete the directory of existing certificates if you running this demo not first time." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bdc71e19-0fcc-4a1f-ba94-8b5e427e45d9", + "metadata": {}, + "outputs": [], + "source": [ + "# Delete certificates directory if you are running this example not first time.\n", + "!rm -rf postgres-tls-certs" + ] + }, + { + "cell_type": "markdown", + "id": "91dc26c9-cfaa-46f5-8252-7ad463264236", + "metadata": {}, + "source": [ + "Generate the certificates by executing below scripts. " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "8e192410", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "..+.......+.........+...+.....+......+.......+...+.....+......+.+..+......+.+.....+...+.......+...+..+.+.....+.......+........+.......+......+...........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+...+...+........+....+..+...+...+....+...+......+..+..........+..+...+...+...............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+...........+......+..........+..+.+.....+....+......+.....................+...+...+..+...+.......+..+.........+.......+.....+....+........+.+..+.............+......+....................+.........+.+......+.....+.......+........+......................+......+..+...+....+...+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", + "..+...+......+.+.........+...+......+..+.......+.....+.+..+...+.+...+......+.....+.........+......+.+...........+....+..................+...+.........+...+.....+.+.....+...............+.+......+...+............+...+......+......+........+.+.....+.............+..+.+..+.+..............+...+...+....+............+...+.....+......+.+.....+.+...+..+...+...................+...........+....+..+.................................+..........+...........+......+.+...+..+...+.......+.....+.......+...........+.......+...+......+.....+..........+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", + "-----\n", + ".+....+......+..+....+...+.....+......+.+........+..........+.....+............+.+...+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+.+..............+...............+.+...........+.......+...+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*....+...............+............+.....+.+......+........+...+...+.+...+.....+......+.+..............+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", + "............+....+.....+.+...+........+..........+..............+.+..............+.........+.+...+...........+......+......+.......+........+...+.........+.+.....+.+.....+.+........+.+.....................+..+.............+........+......+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", + "-----\n", + "Certificate request self-signature ok\n", + "subject=CN = postgresql.feast.svc.cluster.local\n", + "..+....+...+.....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*....+.+..+.......+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.+..+.+.....+.+...+..................+.....+...+...................+......+..+...+.+......+..+..........+..+..................+.+..+...+......+.+............+..+....+...........+..........+.....+...+......+.+...+...+..+......+.+...+...+.........+......+.....+..................+.+.....+....+..............+.+..............+.+......+....................+..........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", + "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.....+.........+...+..+.......+.....+.+..+.+......+....................+......+.............+......+...+..+...+.+..+...+....+.....+...+...+.........+......+.+.....+.+..+..........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*..+...+.+...........+....+.....+...................+..+.+..+......+............+..........+.........+...+..+...............+..........+.....+....+............+........+.+........+.+.....+.......+.....++\n", + "-----\n", + "Certificate request self-signature ok\n", + "subject=CN = admin\n" + ] + } + ], + "source": [ + "# Create a directory for certificates\n", + "!mkdir -p postgres-tls-certs\n", + "\n", + "# Generate a CA certificate\n", + "!openssl req -new -x509 -days 365 -nodes -out postgres-tls-certs/ca.crt -keyout postgres-tls-certs/ca.key -subj \"/CN=PostgreSQL CA\"\n", + "\n", + "# Generate a server certificate\n", + "!openssl req -new -nodes -out postgres-tls-certs/server.csr -keyout postgres-tls-certs/server.key -subj \"/CN=postgresql.feast.svc.cluster.local\"\n", + "!openssl x509 -req -in postgres-tls-certs/server.csr -days 365 -CA postgres-tls-certs/ca.crt -CAkey postgres-tls-certs/ca.key -CAcreateserial -out postgres-tls-certs/server.crt\n", + "\n", + "# Generate a client certificate\n", + "!openssl req -new -nodes -out postgres-tls-certs/client.csr -keyout postgres-tls-certs/client.key -subj \"/CN=admin\"\n", + "!openssl x509 -req -in postgres-tls-certs/client.csr -days 365 -CA postgres-tls-certs/ca.crt -CAkey postgres-tls-certs/ca.key -CAcreateserial -out postgres-tls-certs/client.crt" + ] + }, + { + "cell_type": "markdown", + "id": "7e39cb28", + "metadata": {}, + "source": [ + "## Step 5: Create Kubernetes Secrets for Certificates" + ] + }, + { + "cell_type": "markdown", + "id": "a4775780-3734-40ba-ae43-48f1e47b481a", + "metadata": {}, + "source": [ + "In this step, we will create **two Kubernetes secrets** that reference the certificates generated earlier step:\n", + "\n", + "- **`postgresql-server-certs`** \n", + " This secret contains the server certificates and will be used by the PostgreSQL server.\n", + "\n", + "- **`postgresql-client-certs`** \n", + " This secret contains the client certificates and will be used by the PostgreSQL client. In our case it will be feast application." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d728d0d5-2ba6-4d4d-b4be-62fb020530d4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret/postgresql-server-certs created\n", + "secret/postgresql-client-certs created\n" + ] + } + ], + "source": [ + "# Create a secret for the server certificates\n", + "!kubectl create secret generic postgresql-server-certs --from-file=ca.crt=./postgres-tls-certs/ca.crt --from-file=tls.crt=./postgres-tls-certs/server.crt --from-file=tls.key=./postgres-tls-certs/server.key\n", + "\n", + "# Create a secret for the client certificates\n", + "!kubectl create secret generic postgresql-client-certs --from-file=ca.crt=./postgres-tls-certs/ca.crt --from-file=tls.crt=./postgres-tls-certs/client.crt --from-file=tls.key=./postgres-tls-certs/client.key" + ] + }, + { + "cell_type": "markdown", + "id": "67d62692", + "metadata": {}, + "source": [ + "## Step 6: Deploy PostgreSQL with Helm" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e14cae77", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME: postgresql\n", + "LAST DEPLOYED: Tue Feb 25 08:12:21 2025\n", + "NAMESPACE: feast\n", + "STATUS: deployed\n", + "REVISION: 1\n", + "TEST SUITE: None\n", + "NOTES:\n", + "CHART NAME: postgresql\n", + "CHART VERSION: 16.4.9\n", + "APP VERSION: 17.3.0\n", + "\n", + "Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information.\n", + "\n", + "** Please be patient while the chart is being deployed **\n", + "\n", + "PostgreSQL can be accessed via port 5432 on the following DNS names from within your cluster:\n", + "\n", + " postgresql.feast.svc.cluster.local - Read/Write connection\n", + "\n", + "To get the password for \"postgres\" run:\n", + "\n", + " export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace feast postgresql -o jsonpath=\"{.data.postgres-password}\" | base64 -d)\n", + "\n", + "To get the password for \"admin\" run:\n", + "\n", + " export POSTGRES_PASSWORD=$(kubectl get secret --namespace feast postgresql -o jsonpath=\"{.data.password}\" | base64 -d)\n", + "\n", + "To connect to your database run the following command:\n", + "\n", + " kubectl run postgresql-client --rm --tty -i --restart='Never' --namespace feast --image docker.io/bitnami/postgresql:17.3.0-debian-12-r1 --env=\"PGPASSWORD=$POSTGRES_PASSWORD\" \\\n", + " --command -- psql --host postgresql -U admin -d feast -p 5432\n", + "\n", + " > NOTE: If you access the container using bash, make sure that you execute \"/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash\" in order to avoid the error \"psql: local user with ID 1001} does not exist\"\n", + "\n", + "To connect to your database from outside the cluster execute the following commands:\n", + "\n", + " kubectl port-forward --namespace feast svc/postgresql 5432:5432 &\n", + " PGPASSWORD=\"$POSTGRES_PASSWORD\" psql --host 127.0.0.1 -U admin -d feast -p 5432\n", + "\n", + "WARNING: The configured password will be ignored on new installation in case when previous PostgreSQL release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue.\n", + "\n", + "WARNING: There are \"resources\" sections in the chart not set. Using \"resourcesPreset\" is not recommended for production. For production installations, please set the following values according to your workload needs:\n", + " - primary.resources\n", + " - readReplicas.resources\n", + " - volumePermissions.resources\n", + "+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n" + ] + } + ], + "source": [ + "# Helm values for TLS configuration\n", + "helm_values = \"\"\"\n", + "tls:\n", + " enabled: true\n", + " certificatesSecret: \"postgresql-server-certs\"\n", + " certFilename: \"tls.crt\"\n", + " certKeyFilename: \"tls.key\"\n", + " certCAFilename: \"ca.crt\"\n", + "\n", + "volumePermissions:\n", + " enabled: true\n", + "\n", + "# Set fixed PostgreSQL credentials\n", + "\n", + "global:\n", + " postgresql:\n", + " auth:\n", + " username: admin\n", + " password: password\n", + " database: feast\n", + "\"\"\"\n", + "\n", + "# Write the values to a file\n", + "with open(\"values.yaml\", \"w\") as f:\n", + " f.write(helm_values)\n", + "\n", + "# Install PostgreSQL with Helm\n", + "!helm install postgresql bitnami/postgresql --version 16.4.9 -f values.yaml -n feast " + ] + }, + { + "cell_type": "markdown", + "id": "5be34ace", + "metadata": {}, + "source": [ + "## Step 7: Verify the postgres Deployment" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "132df785-762e-473a-90d2-5fdb66a59a97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pod/postgresql-0 condition met\n", + "\n", + "NAME READY STATUS RESTARTS AGE\n", + "postgresql-0 1/1 Running 0 14s\n", + "\n", + "Defaulted container \"postgresql\" out of: postgresql, init-chmod-data (init)\n", + "ssl = 'on'\n", + "ssl_ca_file = '/opt/bitnami/postgresql/certs/ca.crt'\n", + "ssl_cert_file = '/opt/bitnami/postgresql/certs/tls.crt'\n", + "#ssl_crl_file = ''\n", + "#ssl_crl_dir = ''\n", + "ssl_key_file = '/opt/bitnami/postgresql/certs/tls.key'\n", + "#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL'\t# allowed SSL ciphers\n", + "#ssl_prefer_server_ciphers = on\n", + "#ssl_ecdh_curve = 'prime256v1'\n", + "#ssl_min_protocol_version = 'TLSv1.2'\n", + "#ssl_max_protocol_version = ''\n", + "#ssl_dh_params_file = ''\n", + "#ssl_passphrase_command = ''\n", + "#ssl_passphrase_command_supports_reload = off\n", + "\n", + "Defaulted container \"postgresql\" out of: postgresql, init-chmod-data (init)\n", + " List of databases\n", + " Name | Owner | Encoding | Locale Provider | Collate | Ctype | Locale | ICU Rules | Access privileges \n", + "-----------+----------+----------+-----------------+-------------+-------------+--------+-----------+-----------------------\n", + " feast | admin | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | =Tc/admin +\n", + " | | | | | | | | admin=CTc/admin\n", + " postgres | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | \n", + " template0 | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | =c/postgres +\n", + " | | | | | | | | postgres=CTc/postgres\n", + " template1 | postgres | UTF8 | libc | en_US.UTF-8 | en_US.UTF-8 | | | =c/postgres +\n", + " | | | | | | | | postgres=CTc/postgres\n", + "(4 rows)\n", + "\n" + ] + } + ], + "source": [ + "# Wait for the status of the PostgreSQL pod to be in Ready status.\n", + "!kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=postgresql --timeout=60s\n", + "\n", + "# Insert an empty line in the output for verbocity.\n", + "print()\n", + "\n", + "# display the pod status.\n", + "!kubectl get pods -l app.kubernetes.io/name=postgresql\n", + "\n", + "# Insert an empty line in the output for verbocity.\n", + "print()\n", + "\n", + "# check if the ssl is on and the path to certificates is configured.\n", + "!kubectl exec postgresql-0 -- cat /opt/bitnami/postgresql/conf/postgresql.conf | grep ssl\n", + "\n", + "# Insert an empty line in the output for verbocity.\n", + "print()\n", + "\n", + "# Connect to PostgreSQL using TLS (non-interactive mode)\n", + "!kubectl exec postgresql-0 -- env PGPASSWORD=password psql -U admin -d feast -c '\\l'\n" + ] + }, + { + "cell_type": "markdown", + "id": "c921423a-81df-456e-9cca-f689070c44d2", + "metadata": {}, + "source": [ + "## Step 8: Port forwarding in the terminal for the connection testing using python" + ] + }, + { + "cell_type": "markdown", + "id": "d6a26bb4-e0e7-419e-9c91-f0d63db127bc", + "metadata": {}, + "source": [ + "**Note:** If you do not intend to test the PostgreSQL connection from outside the Kubernetes cluster, you can skip the remaining steps." + ] + }, + { + "cell_type": "markdown", + "id": "6fcad5e1-66d2-4353-aba7-3549ef21bc9f", + "metadata": {}, + "source": [ + "**Note:**\n", + "To test a connection to a PostgreSQL database outside of your Kubernetes cluster, you'll need to execute the following command in your system's terminal window. This is necessary because Jupyter Notebook does not support running commands in a separate thread." + ] + }, + { + "cell_type": "markdown", + "id": "88a4a7c1-51c4-4c5a-9472-5cace1c47a1c", + "metadata": {}, + "source": [ + "kubectl port-forward svc/postgresql 5432:5432" + ] + }, + { + "cell_type": "markdown", + "id": "a8777ca3-bf59-4f23-b7d0-60ae8c92d5a5", + "metadata": {}, + "source": [ + "## Step 9: Check the connection using Python sql alchemy" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5a523f9f-784f-493b-b69d-5a3cb1a830af", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "postgresql+psycopg://admin:password@localhost:5432/feast?sslmode=verify-ca&sslrootcert=postgres-tls-certs/ca.crt&sslcert=postgres-tls-certs/client.crt&sslkey=postgres-tls-certs/client.key\n", + "Connected successfully!\n" + ] + } + ], + "source": [ + "# Define database connection parameters\n", + "DB_USER = \"admin\"\n", + "DB_PASSWORD = \"password\"\n", + "DB_HOST = \"localhost\"\n", + "DB_PORT = \"5432\"\n", + "DB_NAME = \"feast\"\n", + "\n", + "# TLS Certificate Paths\n", + "SSL_CERT = \"postgres-tls-certs/client.crt\"\n", + "SSL_KEY = \"postgres-tls-certs/client.key\"\n", + "SSL_ROOT_CERT = \"postgres-tls-certs/ca.crt\"\n", + "\n", + "import os\n", + "os.environ[\"FEAST_CA_CERT_FILE_PATH\"] = \"postgres-tls-certs/ca.crt\"\n", + "\n", + "from sqlalchemy import create_engine\n", + "# Create SQLAlchemy connection string\n", + "DATABASE_URL = (\n", + " f\"postgresql+psycopg://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}?\"\n", + " f\"sslmode=verify-ca&sslrootcert={SSL_ROOT_CERT}&sslcert={SSL_CERT}&sslkey={SSL_KEY}\"\n", + ")\n", + "\n", + "print(DATABASE_URL)\n", + "\n", + "# Create SQLAlchemy engine\n", + "engine = create_engine(DATABASE_URL)\n", + "\n", + "# Test connection\n", + "try:\n", + " with engine.connect() as connection:\n", + " print(\"Connected successfully!\")\n", + "except Exception as e:\n", + " print(\"Connection failed: Make sure that port forwarding step is done in the terminal.\", e)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7503e47e-12f1-44dd-8a50-786d744bbf4c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/operator-postgres-tls-demo/02-Install-feast.ipynb b/examples/operator-postgres-tls-demo/02-Install-feast.ipynb new file mode 100644 index 00000000000..16948b3610c --- /dev/null +++ b/examples/operator-postgres-tls-demo/02-Install-feast.ipynb @@ -0,0 +1,458 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Install Feast on Kubernetes with the Feast Operator\n", + "## Objective\n", + "\n", + "Provide a reference implementation of a runbook to deploy a Feast environment on a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io/docs/user/quick-start) and the [Feast Operator](../../infra/feast-operator/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prerequisites\n", + "* Kubernetes Cluster\n", + "* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) Kubernetes CLI tool." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the Feast Operator" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast-operator-system created\n", + "customresourcedefinition.apiextensions.k8s.io/featurestores.feast.dev created\n", + "serviceaccount/feast-operator-controller-manager created\n", + "role.rbac.authorization.k8s.io/feast-operator-leader-election-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-editor-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-viewer-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-manager-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-auth-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-reader created\n", + "rolebinding.rbac.authorization.k8s.io/feast-operator-leader-election-rolebinding created\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-manager-rolebinding created\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-metrics-auth-rolebinding created\n", + "service/feast-operator-controller-manager-metrics-service created\n", + "deployment.apps/feast-operator-controller-manager created\n", + "deployment.apps/feast-operator-controller-manager condition met\n" + ] + } + ], + "source": [ + "## Use this install command from a release branch (e.g. 'v0.46-branch')\n", + "!kubectl apply -f ../../infra/feast-operator/dist/install.yaml\n", + "\n", + "## OR, for the latest code/builds, use one the following commands from the 'master' branch\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:develop FS_IMG=quay.io/feastdev-ci/feature-server:develop\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:$(git rev-parse HEAD) FS_IMG=quay.io/feastdev-ci/feature-server:$(git rev-parse HEAD)\n", + "\n", + "!kubectl wait --for=condition=available --timeout=5m deployment/feast-operator-controller-manager -n feast-operator-system" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the Feast services via FeatureStore CR\n", + "Next, we'll use the running Feast Operator to install the feast services. Before doing that it is important to understand basic understanding of operator support of Volumes and volumeMounts and how to mount TLS certificates." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Mounting TLS Certificates with Volumes in Feast Operator \n", + "\n", + "The Feast operator supports **volumes** and **volumeMounts**, allowing you to mount TLS certificates onto a pod. This approach provides flexibility in how you mount these files, supporting different Kubernetes resources such as **Secrets, ConfigMaps,** and **Persistent Volumes (PVs).** \n", + "\n", + "#### Example: Mounting Certificates Using Kubernetes Secrets \n", + "\n", + "In this example, we demonstrate how to mount TLS certificates using **Kubernetes Secrets** that were created in a previous notebook. \n", + "\n", + "#### PostgreSQL Connection Parameters \n", + "\n", + "When connecting to PostgreSQL with TLS, some important parameters in the connection URL are: \n", + "\n", + "- **`sslrootcert`** – Specifies the path to the **CA certificate** file used to validate trusted certificates. \n", + "- **`sslcert`** – Provides the client certificate for **mutual TLS (mTLS) encryption**. \n", + "- **`sslkey`** – Specifies the private key for the client certificate. \n", + "\n", + "If mutual TLS authentication is not required, you can **omit** the `sslcert` and `sslkey` parameters. However, the `sslrootcert` parameter is still necessary for validating server certificates. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " Note: Please deploy either option 1 or 2 only. Don't deploy both of them at the same time to avoid conflicts in the lateral steps. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Option 1: Directly Setting the CA Certificate Path** \n", + "\n", + "In this approach, we specify the CA certificate path directly in the Feast PostgreSQL URL using the `sslrootcert` parameter. \n", + "\n", + "You can refer to the `v1alpha1_featurestore_postgres_db_volumes_tls.yaml` file for the complete configuration details. " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret/postgres-secret created\n", + "secret/feast-data-stores created\n", + "featurestore.feast.dev/sample-db-ssl created\n" + ] + } + ], + "source": [ + "!kubectl apply -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_db_volumes_tls.yaml --namespace=feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Option 2: Using an Environment Variable for the CA Certificate** \n", + "\n", + "In this approach, you define the CA certificate path as an environment variable. You can refer to the `v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml` file for the complete configuration details. \n", + "\n", + "```bash\n", + "FEAST_CA_CERT_FILE_PATH=\n" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret/postgres-secret created\n", + "secret/feast-data-stores created\n", + "featurestore.feast.dev/sample-db-ssl created\n" + ] + } + ], + "source": [ + "!kubectl apply -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml --namespace=feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Validate the running FeatureStore deployment\n", + "Validate the deployment status." + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "deployment.apps/feast-sample-db-ssl condition met\n", + "NAME READY STATUS RESTARTS AGE\n", + "pod/feast-sample-db-ssl-86b47d54-hclb9 1/1 Running 0 27s\n", + "pod/postgresql-0 1/1 Running 0 13h\n", + "\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n", + "service/feast-sample-db-ssl-online ClusterIP 10.96.61.65 80/TCP 27s\n", + "service/postgresql ClusterIP 10.96.228.3 5432/TCP 13h\n", + "service/postgresql-hl ClusterIP None 5432/TCP 13h\n", + "\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\n", + "deployment.apps/feast-sample-db-ssl 1/1 1 1 27s\n", + "\n", + "NAME DESIRED CURRENT READY AGE\n", + "replicaset.apps/feast-sample-db-ssl-86b47d54 1 1 1 27s\n", + "\n", + "NAME READY AGE\n", + "statefulset.apps/postgresql 1/1 13h\n" + ] + } + ], + "source": [ + "!kubectl wait --for=condition=available --timeout=8m deployment/feast-sample-db-ssl -n feast\n", + "!kubectl get all" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Validate that the FeatureStore CR is in a `Ready` state." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME STATUS AGE\n", + "sample-db-ssl Ready 33s\n" + ] + } + ], + "source": [ + "!kubectl get feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Verify that the DB includes the expected tables." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Defaulted container \"postgresql\" out of: postgresql, init-chmod-data (init)\n", + " List of relations\n", + " Schema | Name | Type | Owner \n", + "--------+------------------------------------------------------+-------+-------\n", + " public | data_sources | table | admin\n", + " public | entities | table | admin\n", + " public | feast_metadata | table | admin\n", + " public | feature_services | table | admin\n", + " public | feature_views | table | admin\n", + " public | managed_infra | table | admin\n", + " public | on_demand_feature_views | table | admin\n", + " public | permissions | table | admin\n", + " public | postgres_tls_sample_env_ca_driver_hourly_stats | table | admin\n", + " public | postgres_tls_sample_env_ca_driver_hourly_stats_fresh | table | admin\n", + " public | projects | table | admin\n", + " public | saved_datasets | table | admin\n", + " public | stream_feature_views | table | admin\n", + " public | validation_references | table | admin\n", + "(14 rows)\n", + "\n" + ] + } + ], + "source": [ + "!kubectl exec postgresql-0 -- env PGPASSWORD=password psql -U admin -d feast -c '\\dt'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Verify the client `feature_store.yaml` and create the sample feature store definitions." + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "project: postgres_tls_sample_env_ca\n", + "provider: local\n", + "offline_store:\n", + " host: ${POSTGRES_HOST}\n", + " type: postgres\n", + " port: 5432\n", + " database: ${POSTGRES_DB}\n", + " db_schema: public\n", + " password: ${POSTGRES_PASSWORD}\n", + " sslcert_path: /var/lib/postgresql/certs/tls.crt\n", + " sslkey_path: /var/lib/postgresql/certs/tls.key\n", + " sslmode: verify-full\n", + " sslrootcert_path: system\n", + " user: ${POSTGRES_USER}\n", + "online_store:\n", + " type: postgres\n", + " database: ${POSTGRES_DB}\n", + " db_schema: public\n", + " host: ${POSTGRES_HOST}\n", + " password: ${POSTGRES_PASSWORD}\n", + " port: 5432\n", + " sslcert_path: /var/lib/postgresql/certs/tls.crt\n", + " sslkey_path: /var/lib/postgresql/certs/tls.key\n", + " sslmode: verify-full\n", + " sslrootcert_path: system\n", + " user: ${POSTGRES_USER}\n", + "registry:\n", + " path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:5432/${POSTGRES_DB}?sslmode=verify-full&sslrootcert=system&sslcert=/var/lib/postgresql/certs/tls.crt&sslkey=/var/lib/postgresql/certs/tls.key\n", + " registry_type: sql\n", + " cache_ttl_seconds: 60\n", + " sqlalchemy_config_kwargs:\n", + " echo: false\n", + " pool_pre_ping: true\n", + "auth:\n", + " type: no_auth\n", + "entity_key_serialization_version: 3\n", + ": MADV_DONTNEED does not work (memset will be used instead)\n", + ": (This is the expected behaviour if you are running under QEMU)\n", + "/opt/app-root/src/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/feast-data/postgres_tls_sample_env_ca/feature_repo/example_repo.py:27: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\n", + " driver = Entity(name=\"driver\", join_keys=[\"driver_id\"])\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\n", + " entity = cls(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "Applying changes for project postgres_tls_sample_env_ca\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\n", + " entity = cls(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/src/sdk/python/feast/feature_store.py:579: RuntimeWarning: On demand feature view is an experimental feature. This API is stable, but the functionality does not scale well for offline retrieval\n", + " warnings.warn(\n", + "Deploying infrastructure for driver_hourly_stats\n", + "Deploying infrastructure for driver_hourly_stats_fresh\n", + " Feast apply is completed. You can go to next step.\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-sample-db-ssl -c online -- cat feature_store.yaml\n", + "!kubectl exec deploy/feast-sample-db-ssl -c online -- feast apply\n", + "print(\" Feast apply is completed. You can go to next step.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "List the registered feast projects & feature views." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ": MADV_DONTNEED does not work (memset will be used instead)\n", + ": (This is the expected behaviour if you are running under QEMU)\n", + "/opt/app-root/src/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\n", + " entity = cls(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "NAME DESCRIPTION TAGS OWNER\n", + "postgres_tls_sample {}\n", + "postgres_tls_sample_env_ca A project for driver statistics {}\n", + ": MADV_DONTNEED does not work (memset will be used instead)\n", + ": (This is the expected behaviour if you are running under QEMU)\n", + "/opt/app-root/src/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\n", + " entity = cls(\n", + "/opt/app-root/src/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "NAME ENTITIES TYPE\n", + "driver_hourly_stats_fresh {'driver'} FeatureView\n", + "driver_hourly_stats {'driver'} FeatureView\n", + "transformed_conv_rate {'driver'} OnDemandFeatureView\n", + "transformed_conv_rate_fresh {'driver'} OnDemandFeatureView\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-sample-db-ssl -c online -- feast projects list\n", + "!kubectl exec deploy/feast-sample-db-ssl -c online -- feast feature-views list" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's verify the feast version." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ": MADV_DONTNEED does not work (memset will be used instead)\n", + ": (This is the expected behaviour if you are running under QEMU)\n", + "/opt/app-root/src/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "Feast SDK Version: \"0.1.dev1+g6c92447.d20250213\"\n" + ] + } + ], + "source": [ + "!kubectl exec deployment/feast-sample-db-ssl -c online -- feast version" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-postgres-tls-demo/03-Uninstall.ipynb b/examples/operator-postgres-tls-demo/03-Uninstall.ipynb new file mode 100644 index 00000000000..007b8d7bc1a --- /dev/null +++ b/examples/operator-postgres-tls-demo/03-Uninstall.ipynb @@ -0,0 +1,134 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Uninstall the Operator and all Feast related objects" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret \"postgres-secret\" deleted\n", + "secret \"feast-data-stores\" deleted\n", + "featurestore.feast.dev \"sample-db-ssl\" deleted\n", + "Error from server (NotFound): error when deleting \"../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml\": secrets \"postgres-secret\" not found\n", + "Error from server (NotFound): error when deleting \"../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml\": secrets \"feast-data-stores\" not found\n", + "Error from server (NotFound): error when deleting \"../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml\": featurestores.feast.dev \"sample-db-ssl\" not found\n" + ] + } + ], + "source": [ + "# If you have choosen the option 1 example earlier.\n", + "!kubectl delete -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_db_volumes_tls.yaml\n", + "\n", + "# If you have choosen the option 2 example earlier.\n", + "!kubectl delete -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml\n", + "\n", + "#!kubectl delete -f ../../infra/feast-operator/dist/install.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Uninstall the Postgresql using helm" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "release \"postgresql\" uninstalled\n", + "secret \"postgresql-server-certs\" deleted\n", + "secret \"postgresql-client-certs\" deleted\n", + "persistentvolumeclaim \"data-postgresql-0\" deleted\n", + "persistentvolume \"pvc-d0c961d9-7579-4e30-842a-b46812b71f74\" deleted\n" + ] + } + ], + "source": [ + "# Uninstall the Helm release\n", + "!helm uninstall postgresql\n", + "\n", + "# Delete the secrets\n", + "!kubectl delete secret postgresql-server-certs\n", + "!kubectl delete secret postgresql-client-certs\n", + "\n", + "# Remove the certificates directory\n", + "!rm -rf postgres-tls-certs\n", + "\n", + "# Remove PV and PVC for clean up. some times those are not deleted automatically and can cause issues.\n", + "# Delete all PVCs in the default namespace\n", + "!kubectl delete pvc --all\n", + "\n", + "# Delete all PVs\n", + "!kubectl delete pv --all" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Ensure everything has been removed, or is in the process of being terminated." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No resources found in feast namespace.\n" + ] + } + ], + "source": [ + "!kubectl get all" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-postgres-tls-demo/README.md b/examples/operator-postgres-tls-demo/README.md new file mode 100644 index 00000000000..70ae00da6ab --- /dev/null +++ b/examples/operator-postgres-tls-demo/README.md @@ -0,0 +1,50 @@ +# Installing Feast on Kubernetes with PostgreSQL TLS Demo using feast operator + +This example folder contains a series of Jupyter Notebooks that guide you through setting up [Feast](https://feast.dev/) on a Kubernetes cluster. + +In this demo, Feast connects to a PostgreSQL database running in TLS mode, ensuring secure communication between services. Additionally, the example demonstrates how feast application references TLS certificates using Kubernetes volumes and volume mounts. While the focus is on mounting TLS certificates, you can also mount any other resources supported by Kubernetes volumes. + +## Prerequisites + +- A running Kubernetes cluster with sufficient resources. +- [Helm](https://helm.sh/) installed and configured. +- The [Feast Operator](https://docs.feast.dev/) for managing Feast deployments. +- Jupyter Notebook or JupyterLab to run the provided notebooks. +- Basic familiarity with Kubernetes, Helm, and TLS concepts. + +## Notebook Overview + +The following Jupyter Notebooks will walk you through the entire process: + +1. **[01-Install-postgres-tls-using-helm.ipynb](./01-Install-postgres-tls-using-helm.ipynb)** + Installs PostgreSQL in TLS mode using a Helm chart. + +2. **[02-Install-feast.ipynb](02-Install-feast.ipynb)** + Deploys Feast using the Feast Operator. + +3. **[03-Uninstall.ipynb](./03-Uninstall.ipynb)** + Uninstalls Feast, the Feast Operator, and the PostgreSQL deployments set up in this demo. + +## How to Run the Demo + +1. **Clone the Repository** + + ```shell + https://github.com/feast-dev/feast.git + cd examples/operator-postgres-tls-demo + ``` +2. Start Jupyter Notebook or JupyterLab from the repository root: + +```shell +jupyter notebook +``` +3. Execute the Notebooks +Run the notebooks in the order listed above. Each notebook contains step-by-step instructions and code to deploy, test, and eventually clean up the demo components. + + +## Troubleshooting +* **Cluster Resources:** +Verify that your Kubernetes cluster has adequate resources before starting the demo. + +* **Logs & Diagnostics:** +If you encounter issues, check the logs for the PostgreSQL and Feast pods. This can help identify problems related to TLS configurations or resource constraints. \ No newline at end of file diff --git a/examples/operator-quickstart/.gitignore b/examples/operator-quickstart/.gitignore new file mode 100644 index 00000000000..335ec9573de --- /dev/null +++ b/examples/operator-quickstart/.gitignore @@ -0,0 +1 @@ +*.tar.gz diff --git a/examples/operator-quickstart/01-Install.ipynb b/examples/operator-quickstart/01-Install.ipynb new file mode 100644 index 00000000000..7b974a721b3 --- /dev/null +++ b/examples/operator-quickstart/01-Install.ipynb @@ -0,0 +1,401 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Install Feast on Kubernetes with the Feast Operator\n", + "## Objective\n", + "\n", + "Provide a reference implementation of a runbook to deploy a Feast environment on a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io/docs/user/quick-start) and the [Feast Operator](../../infra/feast-operator/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prerequisites\n", + "* Kubernetes Cluster\n", + "* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) Kubernetes CLI tool." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install Prerequisites\n", + "\n", + "The following commands install and configure all the prerequisites on a MacOS environment. You can find the\n", + "equivalent instructions on the offical documentation pages:\n", + "* Install the `kubectl` cli.\n", + "* Install Kubernetes and Container runtime (e.g. [Colima](https://github.com/abiosoft/colima)).\n", + " * Alternatively, authenticate to an existing Kubernetes or OpenShift cluster.\n", + " \n", + "```bash\n", + "brew install colima kubectl\n", + "colima start -r containerd -k -m 3 -d 100 -c 2 --cpu-type max -a x86_64\n", + "colima list\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast created\n", + "Context \"colima\" modified.\n" + ] + } + ], + "source": [ + "!kubectl create ns feast\n", + "!kubectl config set-context --current --namespace feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Validate the cluster setup:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME STATUS AGE\n", + "feast Active 6s\n" + ] + } + ], + "source": [ + "!kubectl get ns feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deployment Architecture\n", + "The primary objective of this runbook is to guide the deployment of Feast services on a Kubernetes Kind cluster, using the `postgres` template to set up a basic feature store.\n", + "\n", + "In this notebook, we will deploy a distributed topology of Feast services, which includes:\n", + "\n", + "* `Registry Server`: Handles metadata storage for feature definitions.\n", + "* `Online Store Server`: Uses the `Registry Server` to query metadata and is responsible for low-latency serving of features.\n", + "* `Offline Store Server`: Uses the `Registry Server` to query metadata and provides access to batch data for historical feature retrieval.\n", + "\n", + "Each service is backed by a `PostgreSQL` database, which is also deployed within the same Kind cluster." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup Postgresql and Redis\n", + "Apply the included [postgres](postgres.yaml) & [redis](redis.yaml) deployments to run simple databases." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret/postgres-secret created\n", + "deployment.apps/postgres created\n", + "service/postgres created\n", + "deployment.apps/redis created\n", + "service/redis created\n", + "deployment.apps/redis condition met\n", + "deployment.apps/postgres condition met\n" + ] + } + ], + "source": [ + "!kubectl apply -f postgres.yaml -f redis.yaml\n", + "!kubectl wait --for=condition=available --timeout=5m deployment/redis\n", + "!kubectl wait --for=condition=available --timeout=5m deployment/postgres" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME READY STATUS RESTARTS AGE\n", + "pod/postgres-ff8d4cf48-c4znd 1/1 Running 0 2m17s\n", + "pod/redis-b4756b75d-r9nfb 1/1 Running 0 2m15s\n", + "\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n", + "service/postgres ClusterIP 10.43.151.129 5432/TCP 2m17s\n", + "service/redis ClusterIP 10.43.169.233 6379/TCP 2m15s\n", + "\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\n", + "deployment.apps/postgres 1/1 1 1 2m18s\n", + "deployment.apps/redis 1/1 1 1 2m16s\n", + "\n", + "NAME DESIRED CURRENT READY AGE\n", + "replicaset.apps/postgres-ff8d4cf48 1 1 1 2m18s\n", + "replicaset.apps/redis-b4756b75d 1 1 1 2m16s\n" + ] + } + ], + "source": [ + "!kubectl get all" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the Feast Operator" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast-operator-system created\n", + "customresourcedefinition.apiextensions.k8s.io/featurestores.feast.dev created\n", + "serviceaccount/feast-operator-controller-manager created\n", + "role.rbac.authorization.k8s.io/feast-operator-leader-election-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-editor-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-viewer-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-manager-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-auth-role created\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-reader created\n", + "rolebinding.rbac.authorization.k8s.io/feast-operator-leader-election-rolebinding created\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-manager-rolebinding created\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-metrics-auth-rolebinding created\n", + "service/feast-operator-controller-manager-metrics-service created\n", + "deployment.apps/feast-operator-controller-manager created\n", + "deployment.apps/feast-operator-controller-manager condition met\n" + ] + } + ], + "source": [ + "## Use this install command from a release branch (e.g. 'v0.43-branch')\n", + "!kubectl apply -f ../../infra/feast-operator/dist/install.yaml\n", + "\n", + "## OR, for the latest code/builds, use one the following commands from the 'master' branch\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:develop FS_IMG=quay.io/feastdev-ci/feature-server:develop\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:$(git rev-parse HEAD) FS_IMG=quay.io/feastdev-ci/feature-server:$(git rev-parse HEAD)\n", + "\n", + "!kubectl wait --for=condition=available --timeout=5m deployment/feast-operator-controller-manager -n feast-operator-system" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the Feast services via FeatureStore CR\n", + "Next, we'll use the running Feast Operator to install the feast services. Apply the included [reference deployment](feast.yaml) to install and configure Feast." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret/feast-data-stores created\n", + "featurestore.feast.dev/example created\n" + ] + } + ], + "source": [ + "!kubectl apply -f feast.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Validate the running FeatureStore deployment\n", + "Validate the deployment status." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME READY STATUS RESTARTS AGE\n", + "pod/feast-example-bbdc6cb6-rzkb4 0/1 Init:0/1 0 3s\n", + "pod/postgres-ff8d4cf48-c4znd 1/1 Running 0 4m49s\n", + "pod/redis-b4756b75d-r9nfb 1/1 Running 0 4m47s\n", + "\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\n", + "service/feast-example-online ClusterIP 10.43.143.216 80/TCP 4s\n", + "service/postgres ClusterIP 10.43.151.129 5432/TCP 4m49s\n", + "service/redis ClusterIP 10.43.169.233 6379/TCP 4m47s\n", + "\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\n", + "deployment.apps/feast-example 0/1 1 0 5s\n", + "deployment.apps/postgres 1/1 1 1 4m51s\n", + "deployment.apps/redis 1/1 1 1 4m49s\n", + "\n", + "NAME DESIRED CURRENT READY AGE\n", + "replicaset.apps/feast-example-bbdc6cb6 1 1 0 4s\n", + "replicaset.apps/postgres-ff8d4cf48 1 1 1 4m51s\n", + "replicaset.apps/redis-b4756b75d 1 1 1 4m49s\n", + "deployment.apps/feast-example condition met\n" + ] + } + ], + "source": [ + "!kubectl get all\n", + "!kubectl wait --for=condition=available --timeout=8m deployment/feast-example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Validate that the FeatureStore CR is in a `Ready` state." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME STATUS AGE\n", + "example Ready 48m\n" + ] + } + ], + "source": [ + "!kubectl get feast" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Verify that the DB includes the expected tables." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " List of relations\n", + " Schema | Name | Type | Owner \n", + "--------+-------------------------+-------+-------\n", + " public | data_sources | table | feast\n", + " public | entities | table | feast\n", + " public | feast_metadata | table | feast\n", + " public | feature_services | table | feast\n", + " public | feature_views | table | feast\n", + " public | managed_infra | table | feast\n", + " public | on_demand_feature_views | table | feast\n", + " public | permissions | table | feast\n", + " public | projects | table | feast\n", + " public | saved_datasets | table | feast\n", + " public | stream_feature_views | table | feast\n", + " public | validation_references | table | feast\n", + "(12 rows)\n", + "\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/postgres -- psql -h localhost -U feast feast -c '\\dt'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, let's verify the feast version." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "Feast SDK Version: \"0.46.0\"\n" + ] + } + ], + "source": [ + "!kubectl exec deployment/feast-example -itc online -- feast version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-quickstart/02-Demo.ipynb b/examples/operator-quickstart/02-Demo.ipynb new file mode 100644 index 00000000000..536e36f490f --- /dev/null +++ b/examples/operator-quickstart/02-Demo.ipynb @@ -0,0 +1,669 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run the \"Real-time Credit Scoring\" tutorial" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll use the following tutorial as a demonstration.\n", + "\n", + "https://github.com/feast-dev/feast-credit-score-local-tutorial/tree/598a270353d8a83b37535f849a0fa000a07be8b5" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Check the init container to ensure the repo was successfully cloned with git." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating feast repository...\n", + "git clone https://github.com/feast-dev/feast-credit-score-local-tutorial /feast-data/credit_scoring_local && cd /feast-data/credit_scoring_local && git checkout 598a270\n", + "Cloning into '/feast-data/credit_scoring_local'...\n", + "Updating files: 100% (25/25), done.\n", + "Note: switching to '598a270'.\n", + "\n", + "You are in 'detached HEAD' state. You can look around, make experimental\n", + "changes and commit them, and you can discard any commits you make in this\n", + "state without impacting any branches by switching back to a branch.\n", + "\n", + "If you want to create a new branch to retain commits you create, you may\n", + "do so (now or later) by using -c with the switch command. Example:\n", + "\n", + " git switch -c \n", + "\n", + "Or undo this operation with:\n", + "\n", + " git switch -\n", + "\n", + "Turn off this advice by setting config variable advice.detachedHead to false\n", + "\n", + "HEAD is now at 598a270 set streamlit version to 1.42.0 (#8)\n", + "Feast repo creation complete\n" + ] + } + ], + "source": [ + "!kubectl logs -f deploy/feast-example -c feast-init" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Verify the client `feature_store.yaml`." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "project: credit_scoring_local\n", + "provider: local\n", + "offline_store:\n", + " type: duckdb\n", + "online_store:\n", + " type: redis\n", + " connection_string: redis.feast.svc.cluster.local:6379\n", + "registry:\n", + " path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.feast.svc.cluster.local:5432/${POSTGRES_DB}\n", + " registry_type: sql\n", + " cache_ttl_seconds: 60\n", + " sqlalchemy_config_kwargs:\n", + " echo: false\n", + " pool_pre_ping: true\n", + "auth:\n", + " type: no_auth\n", + "entity_key_serialization_version: 3\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- cat feature_store.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Apply the tutorial feature store definitions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Update the feature store definitions for the tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "No project found in the repository. Using project name credit_scoring_local defined in feature_store.yaml\n", + "Applying changes for project credit_scoring_local\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_store.py:579: RuntimeWarning: On demand feature view is an experimental feature. This API is stable, but the functionality does not scale well for offline retrieval\n", + " warnings.warn(\n", + "Deploying infrastructure for \u001b[1m\u001b[32mzipcode_features\u001b[0m\n", + "Deploying infrastructure for \u001b[1m\u001b[32mcredit_history\u001b[0m\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- feast apply" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load data from feature views into the online store, beginning from either the previous materialize or materialize-incremental end date, or the beginning of time." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "Materializing \u001b[1m\u001b[32m2\u001b[0m feature views to \u001b[1m\u001b[32m2025-02-20 21:23:35+00:00\u001b[0m into the \u001b[1m\u001b[32mredis\u001b[0m online store.\n", + "\n", + "\u001b[1m\u001b[32mzipcode_features\u001b[0m from \u001b[1m\u001b[32m2015-02-23 21:24:12+00:00\u001b[0m to \u001b[1m\u001b[32m2025-02-20 21:23:35+00:00\u001b[0m:\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "100%|███████████████████████████████████████████████████████| 28844/28844 [00:28<00:00, 1023.99it/s]\n", + "\u001b[1m\u001b[32mcredit_history\u001b[0m from \u001b[1m\u001b[32m2024-11-22 21:24:43+00:00\u001b[0m to \u001b[1m\u001b[32m2025-02-20 21:23:35+00:00\u001b[0m:\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "0it [00:00, ?it/s]\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- bash -c 'feast materialize-incremental $(date -u +\"%Y-%m-%dT%H:%M:%S\")'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Execute feast commands inside the client Pod" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "List the registered feast projects, feature views, & entities." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "NAME DESCRIPTION TAGS OWNER\n", + "credit_scoring_local {}\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "NAME ENTITIES TYPE\n", + "zipcode_features {'zipcode'} FeatureView\n", + "credit_history {'dob_ssn'} FeatureView\n", + "total_debt_calc {'dob_ssn'} OnDemandFeatureView\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "NAME DESCRIPTION TYPE\n", + "dob_ssn Date of birth and last four digits of social security number ValueType.STRING\n", + "zipcode ValueType.INT64\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- feast projects list\n", + "!kubectl exec deploy/feast-example -itc online -- feast feature-views list\n", + "!kubectl exec deploy/feast-example -itc online -- feast entities list" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train and test the model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install the required packages." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting streamlit==1.42.0 (from -r ../requirements.txt (line 1))\n", + " Obtaining dependency information for streamlit==1.42.0 from https://files.pythonhosted.org/packages/ad/dc/69068179e09488d0833a970d06e8bf40e35669a7bddb8a3caadc13b7dff4/streamlit-1.42.0-py2.py3-none-any.whl.metadata\n", + " Downloading streamlit-1.42.0-py2.py3-none-any.whl.metadata (8.9 kB)\n", + "Collecting shap (from -r ../requirements.txt (line 2))\n", + " Obtaining dependency information for shap from https://files.pythonhosted.org/packages/06/6a/09e3cb9864118337c0f3c2a0dc5add6b642e9f672665062e186d67ba992d/shap-0.46.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading shap-0.46.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (24 kB)\n", + "Requirement already satisfied: pandas in /opt/app-root/lib64/python3.11/site-packages (from -r ../requirements.txt (line 3)) (2.2.3)\n", + "Collecting scikit-learn (from -r ../requirements.txt (line 4))\n", + " Obtaining dependency information for scikit-learn from https://files.pythonhosted.org/packages/a8/f3/62fc9a5a659bb58a03cdd7e258956a5824bdc9b4bb3c5d932f55880be569/scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (18 kB)\n", + "Collecting matplotlib (from -r ../requirements.txt (line 5))\n", + " Obtaining dependency information for matplotlib from https://files.pythonhosted.org/packages/b2/7d/2d873209536b9ee17340754118a2a17988bc18981b5b56e6715ee07373ac/matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", + "Collecting altair<6,>=4.0 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for altair<6,>=4.0 from https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl.metadata\n", + " Downloading altair-5.5.0-py3-none-any.whl.metadata (11 kB)\n", + "Collecting blinker<2,>=1.0.0 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for blinker<2,>=1.0.0 from https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl.metadata\n", + " Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB)\n", + "Requirement already satisfied: cachetools<6,>=4.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (5.5.1)\n", + "Requirement already satisfied: click<9,>=7.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (8.1.8)\n", + "Requirement already satisfied: numpy<3,>=1.23 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (1.26.4)\n", + "Requirement already satisfied: packaging<25,>=20 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (24.2)\n", + "Collecting pillow<12,>=7.1.0 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for pillow<12,>=7.1.0 from https://files.pythonhosted.org/packages/48/a4/fbfe9d5581d7b111b28f1d8c2762dee92e9821bb209af9fa83c940e507a0/pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl.metadata\n", + " Downloading pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl.metadata (9.1 kB)\n", + "Requirement already satisfied: protobuf<6,>=3.20 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (5.29.3)\n", + "Requirement already satisfied: pyarrow>=7.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (17.0.0)\n", + "Requirement already satisfied: requests<3,>=2.27 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (2.32.3)\n", + "Requirement already satisfied: rich<14,>=10.14.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (13.9.4)\n", + "Requirement already satisfied: tenacity<10,>=8.1.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (8.5.0)\n", + "Requirement already satisfied: toml<2,>=0.10.1 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (0.10.2)\n", + "Requirement already satisfied: typing-extensions<5,>=4.4.0 in /opt/app-root/lib64/python3.11/site-packages (from streamlit==1.42.0->-r ../requirements.txt (line 1)) (4.12.2)\n", + "Collecting watchdog<7,>=2.1.5 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for watchdog<7,>=2.1.5 from https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl.metadata\n", + " Downloading watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl.metadata (44 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.3/44.3 kB\u001b[0m \u001b[31m13.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting gitpython!=3.1.19,<4,>=3.0.7 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for gitpython!=3.1.19,<4,>=3.0.7 from https://files.pythonhosted.org/packages/1d/9a/4114a9057db2f1462d5c8f8390ab7383925fe1ac012eaa42402ad65c2963/GitPython-3.1.44-py3-none-any.whl.metadata\n", + " Downloading GitPython-3.1.44-py3-none-any.whl.metadata (13 kB)\n", + "Collecting pydeck<1,>=0.8.0b4 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for pydeck<1,>=0.8.0b4 from https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl.metadata\n", + " Downloading pydeck-0.9.1-py2.py3-none-any.whl.metadata (4.1 kB)\n", + "Collecting tornado<7,>=6.0.3 (from streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for tornado<7,>=6.0.3 from https://files.pythonhosted.org/packages/22/55/b78a464de78051a30599ceb6983b01d8f732e6f69bf37b4ed07f642ac0fc/tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (2.5 kB)\n", + "Collecting scipy (from shap->-r ../requirements.txt (line 2))\n", + " Obtaining dependency information for scipy from https://files.pythonhosted.org/packages/32/ea/564bacc26b676c06a00266a3f25fdfe91a9d9a2532ccea7ce6dd394541bc/scipy-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading scipy-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.0/62.0 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tqdm>=4.27.0 in /opt/app-root/lib64/python3.11/site-packages (from shap->-r ../requirements.txt (line 2)) (4.67.1)\n", + "Collecting slicer==0.0.8 (from shap->-r ../requirements.txt (line 2))\n", + " Obtaining dependency information for slicer==0.0.8 from https://files.pythonhosted.org/packages/63/81/9ef641ff4e12cbcca30e54e72fb0951a2ba195d0cda0ba4100e532d929db/slicer-0.0.8-py3-none-any.whl.metadata\n", + " Downloading slicer-0.0.8-py3-none-any.whl.metadata (4.0 kB)\n", + "Collecting numba (from shap->-r ../requirements.txt (line 2))\n", + " Obtaining dependency information for numba from https://files.pythonhosted.org/packages/14/91/18b9f64b34ff318a14d072251480547f89ebfb864b2b7168e5dc5f64f502/numba-0.61.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata\n", + " Downloading numba-0.61.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (2.8 kB)\n", + "Requirement already satisfied: cloudpickle in /opt/app-root/lib64/python3.11/site-packages (from shap->-r ../requirements.txt (line 2)) (3.1.1)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /opt/app-root/lib64/python3.11/site-packages (from pandas->-r ../requirements.txt (line 3)) (2.9.0.post0)\n", + "Requirement already satisfied: pytz>=2020.1 in /opt/app-root/lib64/python3.11/site-packages (from pandas->-r ../requirements.txt (line 3)) (2025.1)\n", + "Requirement already satisfied: tzdata>=2022.7 in /opt/app-root/lib64/python3.11/site-packages (from pandas->-r ../requirements.txt (line 3)) (2025.1)\n", + "Collecting joblib>=1.2.0 (from scikit-learn->-r ../requirements.txt (line 4))\n", + " Obtaining dependency information for joblib>=1.2.0 from https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl.metadata\n", + " Downloading joblib-1.4.2-py3-none-any.whl.metadata (5.4 kB)\n", + "Collecting threadpoolctl>=3.1.0 (from scikit-learn->-r ../requirements.txt (line 4))\n", + " Obtaining dependency information for threadpoolctl>=3.1.0 from https://files.pythonhosted.org/packages/4b/2c/ffbf7a134b9ab11a67b0cf0726453cedd9c5043a4fe7a35d1cefa9a1bcfb/threadpoolctl-3.5.0-py3-none-any.whl.metadata\n", + " Downloading threadpoolctl-3.5.0-py3-none-any.whl.metadata (13 kB)\n", + "Collecting contourpy>=1.0.1 (from matplotlib->-r ../requirements.txt (line 5))\n", + " Obtaining dependency information for contourpy>=1.0.1 from https://files.pythonhosted.org/packages/85/fc/7fa5d17daf77306840a4e84668a48ddff09e6bc09ba4e37e85ffc8e4faa3/contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (5.4 kB)\n", + "Collecting cycler>=0.10 (from matplotlib->-r ../requirements.txt (line 5))\n", + " Obtaining dependency information for cycler>=0.10 from https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl.metadata\n", + " Downloading cycler-0.12.1-py3-none-any.whl.metadata (3.8 kB)\n", + "Collecting fonttools>=4.22.0 (from matplotlib->-r ../requirements.txt (line 5))\n", + " Obtaining dependency information for fonttools>=4.22.0 from https://files.pythonhosted.org/packages/28/e9/47c02d5a7027e8ed841ab6a10ca00c93dadd5f16742f1af1fa3f9978adf4/fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (101 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m101.9/101.9 kB\u001b[0m \u001b[31m9.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting kiwisolver>=1.3.1 (from matplotlib->-r ../requirements.txt (line 5))\n", + " Obtaining dependency information for kiwisolver>=1.3.1 from https://files.pythonhosted.org/packages/3a/97/5edbed69a9d0caa2e4aa616ae7df8127e10f6586940aa683a496c2c280b9/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.2 kB)\n", + "Collecting pyparsing>=2.3.1 (from matplotlib->-r ../requirements.txt (line 5))\n", + " Obtaining dependency information for pyparsing>=2.3.1 from https://files.pythonhosted.org/packages/1c/a7/c8a2d361bf89c0d9577c934ebb7421b25dc84bf3a8e3ac0a40aed9acc547/pyparsing-3.2.1-py3-none-any.whl.metadata\n", + " Downloading pyparsing-3.2.1-py3-none-any.whl.metadata (5.0 kB)\n", + "Requirement already satisfied: jinja2 in /opt/app-root/lib64/python3.11/site-packages (from altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (3.1.5)\n", + "Requirement already satisfied: jsonschema>=3.0 in /opt/app-root/lib64/python3.11/site-packages (from altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (4.23.0)\n", + "Collecting narwhals>=1.14.2 (from altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for narwhals>=1.14.2 from https://files.pythonhosted.org/packages/ed/ea/dc14822a0a75e027562f081eb638417b1b7845e1e01dd85c5b6573ebf1b2/narwhals-1.27.1-py3-none-any.whl.metadata\n", + " Downloading narwhals-1.27.1-py3-none-any.whl.metadata (10 kB)\n", + "Collecting gitdb<5,>=4.0.1 (from gitpython!=3.1.19,<4,>=3.0.7->streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for gitdb<5,>=4.0.1 from https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl.metadata\n", + " Downloading gitdb-4.0.12-py3-none-any.whl.metadata (1.2 kB)\n", + "Requirement already satisfied: six>=1.5 in /opt/app-root/lib64/python3.11/site-packages (from python-dateutil>=2.8.2->pandas->-r ../requirements.txt (line 3)) (1.17.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/app-root/lib64/python3.11/site-packages (from requests<3,>=2.27->streamlit==1.42.0->-r ../requirements.txt (line 1)) (3.4.1)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/app-root/lib64/python3.11/site-packages (from requests<3,>=2.27->streamlit==1.42.0->-r ../requirements.txt (line 1)) (3.10)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/app-root/lib64/python3.11/site-packages (from requests<3,>=2.27->streamlit==1.42.0->-r ../requirements.txt (line 1)) (2.3.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/app-root/lib64/python3.11/site-packages (from requests<3,>=2.27->streamlit==1.42.0->-r ../requirements.txt (line 1)) (2025.1.31)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /opt/app-root/lib64/python3.11/site-packages (from rich<14,>=10.14.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (3.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /opt/app-root/lib64/python3.11/site-packages (from rich<14,>=10.14.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (2.19.1)\n", + "Collecting llvmlite<0.45,>=0.44.0dev0 (from numba->shap->-r ../requirements.txt (line 2))\n", + " Obtaining dependency information for llvmlite<0.45,>=0.44.0dev0 from https://files.pythonhosted.org/packages/99/fe/d030f1849ebb1f394bb3f7adad5e729b634fb100515594aca25c354ffc62/llvmlite-0.44.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n", + " Downloading llvmlite-0.44.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.8 kB)\n", + "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit==1.42.0->-r ../requirements.txt (line 1))\n", + " Obtaining dependency information for smmap<6,>=3.0.1 from https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl.metadata\n", + " Downloading smmap-5.0.2-py3-none-any.whl.metadata (4.3 kB)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /opt/app-root/lib64/python3.11/site-packages (from jinja2->altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (3.0.2)\n", + "Requirement already satisfied: attrs>=22.2.0 in /opt/app-root/lib64/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (25.1.0)\n", + "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /opt/app-root/lib64/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (2024.10.1)\n", + "Requirement already satisfied: referencing>=0.28.4 in /opt/app-root/lib64/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (0.36.2)\n", + "Requirement already satisfied: rpds-py>=0.7.1 in /opt/app-root/lib64/python3.11/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (0.22.3)\n", + "Requirement already satisfied: mdurl~=0.1 in /opt/app-root/lib64/python3.11/site-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit==1.42.0->-r ../requirements.txt (line 1)) (0.1.2)\n", + "Downloading streamlit-1.42.0-py2.py3-none-any.whl (9.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.6/9.6 MB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0mm\n", + "\u001b[?25hDownloading shap-0.46.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (540 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m540.2/540.2 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading slicer-0.0.8-py3-none-any.whl (15 kB)\n", + "Downloading scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (13.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.5/13.5 MB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.6/8.6 MB\u001b[0m \u001b[31m3.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading altair-5.5.0-py3-none-any.whl (731 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m731.2/731.2 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading blinker-1.9.0-py3-none-any.whl (8.5 kB)\n", + "Downloading contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (326 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m326.2/326.2 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading cycler-0.12.1-py3-none-any.whl (8.3 kB)\n", + "Downloading fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.9 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.9/4.9 MB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading GitPython-3.1.44-py3-none-any.whl (207 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.6/207.6 kB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading joblib-1.4.2-py3-none-any.whl (301 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m301.8/301.8 kB\u001b[0m \u001b[31m15.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.4/1.4 MB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl (4.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.5/4.5 MB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0mm\n", + "\u001b[?25hDownloading pydeck-0.9.1-py2.py3-none-any.whl (6.9 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.9/6.9 MB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0mm\n", + "\u001b[?25hDownloading pyparsing-3.2.1-py3-none-any.whl (107 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m107.7/107.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading scipy-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (37.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37.6/37.6 MB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading threadpoolctl-3.5.0-py3-none-any.whl (18 kB)\n", + "Downloading tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (437 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m437.2/437.2 kB\u001b[0m \u001b[31m14.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl (79 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.1/79.1 kB\u001b[0m \u001b[31m14.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading numba-0.61.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (3.8 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading gitdb-4.0.12-py3-none-any.whl (62 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading llvmlite-0.44.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (42.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m42.4/42.4 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading narwhals-1.27.1-py3-none-any.whl (308 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m308.8/308.8 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading smmap-5.0.2-py3-none-any.whl (24 kB)\n", + "Installing collected packages: watchdog, tornado, threadpoolctl, smmap, slicer, scipy, pyparsing, pillow, narwhals, llvmlite, kiwisolver, joblib, fonttools, cycler, contourpy, blinker, scikit-learn, pydeck, numba, matplotlib, gitdb, shap, gitpython, altair, streamlit\n", + "Successfully installed altair-5.5.0 blinker-1.9.0 contourpy-1.3.1 cycler-0.12.1 fonttools-4.56.0 gitdb-4.0.12 gitpython-3.1.44 joblib-1.4.2 kiwisolver-1.4.8 llvmlite-0.44.0 matplotlib-3.10.0 narwhals-1.27.1 numba-0.61.0 pillow-11.1.0 pydeck-0.9.1 pyparsing-3.2.1 scikit-learn-1.6.1 scipy-1.15.2 shap-0.46.0 slicer-0.0.8 smmap-5.0.2 streamlit-1.42.0 threadpoolctl-3.5.0 tornado-6.4.2 watchdog-6.0.0\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- bash -c 'pip install -r ../requirements.txt'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Train and test the model." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "Loan rejected!\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- bash -c 'cd ../ && python run.py'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Interactive demo (using Streamlit)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a new terminal, run the following command and leave it active.\n", + "\n", + "```bash\n", + "$ kubectl port-forward deploy/feast-example 8501:8501\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Start the Streamlit application" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Collecting usage statistics. To deactivate, set browser.gatherUsageStats to false.\n", + "\u001b[0m\n", + "\u001b[0m\n", + "\u001b[34m\u001b[1m You can now view your Streamlit app in your browser.\u001b[0m\n", + "\u001b[0m\n", + "\u001b[34m Local URL: \u001b[0m\u001b[1mhttp://localhost:8501\u001b[0m\n", + "\u001b[34m Network URL: \u001b[0m\u001b[1mhttp://10.42.0.8:8501\u001b[0m\n", + "\u001b[34m External URL: \u001b[0m\u001b[1mhttp://23.112.66.217:8501\u001b[0m\n", + "\u001b[0m\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "(1000, 22)\n", + "2025-02-20 21:57:48.314 \n", + "Calling `st.pyplot()` without providing a figure argument has been deprecated\n", + "and will be removed in a later version as it requires the use of Matplotlib's\n", + "global figure object, which is not thread-safe.\n", + "\n", + "To future-proof this code, you should pass in a figure as shown below:\n", + "\n", + "```python\n", + "fig, ax = plt.subplots()\n", + "ax.scatter([1, 2, 3], [1, 2, 3])\n", + "# other plotting actions...\n", + "st.pyplot(fig)\n", + "```\n", + "\n", + "If you have a specific use case that requires this functionality, please let us\n", + "know via [issue on Github](https://github.com/streamlit/streamlit/issues).\n", + "\n", + "2025-02-20 21:57:57.474 \n", + "Calling `st.pyplot()` without providing a figure argument has been deprecated\n", + "and will be removed in a later version as it requires the use of Matplotlib's\n", + "global figure object, which is not thread-safe.\n", + "\n", + "To future-proof this code, you should pass in a figure as shown below:\n", + "\n", + "```python\n", + "fig, ax = plt.subplots()\n", + "ax.scatter([1, 2, 3], [1, 2, 3])\n", + "# other plotting actions...\n", + "st.pyplot(fig)\n", + "```\n", + "\n", + "If you have a specific use case that requires this functionality, please let us\n", + "know via [issue on Github](https://github.com/streamlit/streamlit/issues).\n", + "\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'zipcode'.\n", + " entity = cls(\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'dob_ssn'.\n", + " entity = cls(\n", + "(1000, 22)\n", + "2025-02-20 21:58:34.935 \n", + "Calling `st.pyplot()` without providing a figure argument has been deprecated\n", + "and will be removed in a later version as it requires the use of Matplotlib's\n", + "global figure object, which is not thread-safe.\n", + "\n", + "To future-proof this code, you should pass in a figure as shown below:\n", + "\n", + "```python\n", + "fig, ax = plt.subplots()\n", + "ax.scatter([1, 2, 3], [1, 2, 3])\n", + "# other plotting actions...\n", + "st.pyplot(fig)\n", + "```\n", + "\n", + "If you have a specific use case that requires this functionality, please let us\n", + "know via [issue on Github](https://github.com/streamlit/streamlit/issues).\n", + "\n", + "2025-02-20 21:58:43.709 \n", + "Calling `st.pyplot()` without providing a figure argument has been deprecated\n", + "and will be removed in a later version as it requires the use of Matplotlib's\n", + "global figure object, which is not thread-safe.\n", + "\n", + "To future-proof this code, you should pass in a figure as shown below:\n", + "\n", + "```python\n", + "fig, ax = plt.subplots()\n", + "ax.scatter([1, 2, 3], [1, 2, 3])\n", + "# other plotting actions...\n", + "st.pyplot(fig)\n", + "```\n", + "\n", + "If you have a specific use case that requires this functionality, please let us\n", + "know via [issue on Github](https://github.com/streamlit/streamlit/issues).\n", + "\n" + ] + } + ], + "source": [ + "!kubectl exec deploy/feast-example -itc online -- bash -c 'cd ../ && streamlit run --server.port 8501 streamlit_app.py'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then navigate to the local URL on which Streamlit is being served.\n", + "\n", + "http://localhost:8501" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-quickstart/03-Uninstall.ipynb b/examples/operator-quickstart/03-Uninstall.ipynb new file mode 100644 index 00000000000..3abd489dd58 --- /dev/null +++ b/examples/operator-quickstart/03-Uninstall.ipynb @@ -0,0 +1,103 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Uninstall the Operator and all Feast related objects" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "secret \"feast-data-stores\" deleted\n", + "featurestore.feast.dev \"example\" deleted\n", + "secret \"postgres-secret\" deleted\n", + "deployment.apps \"postgres\" deleted\n", + "service \"postgres\" deleted\n", + "deployment.apps \"redis\" deleted\n", + "service \"redis\" deleted\n", + "namespace \"feast-operator-system\" deleted\n", + "customresourcedefinition.apiextensions.k8s.io \"featurestores.feast.dev\" deleted\n", + "serviceaccount \"feast-operator-controller-manager\" deleted\n", + "role.rbac.authorization.k8s.io \"feast-operator-leader-election-role\" deleted\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-featurestore-editor-role\" deleted\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-featurestore-viewer-role\" deleted\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-manager-role\" deleted\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-metrics-auth-role\" deleted\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-metrics-reader\" deleted\n", + "rolebinding.rbac.authorization.k8s.io \"feast-operator-leader-election-rolebinding\" deleted\n", + "clusterrolebinding.rbac.authorization.k8s.io \"feast-operator-manager-rolebinding\" deleted\n", + "clusterrolebinding.rbac.authorization.k8s.io \"feast-operator-metrics-auth-rolebinding\" deleted\n", + "service \"feast-operator-controller-manager-metrics-service\" deleted\n", + "deployment.apps \"feast-operator-controller-manager\" deleted\n" + ] + } + ], + "source": [ + "!kubectl delete -f feast.yaml\n", + "!kubectl delete -f postgres.yaml\n", + "!kubectl delete -f redis.yaml\n", + "!kubectl delete -f ../../infra/feast-operator/dist/install.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Ensure everything has been removed, or is in the process of being terminated." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No resources found in feast namespace.\n" + ] + } + ], + "source": [ + "!kubectl get all" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-quickstart/README.md b/examples/operator-quickstart/README.md new file mode 100644 index 00000000000..56ba7173b56 --- /dev/null +++ b/examples/operator-quickstart/README.md @@ -0,0 +1,7 @@ +# Install and run a Feature Store on Kubernetes with the Feast Operator + +The following notebooks will guide you through how to install and use Feast on Kubernetes with the Feast Go Operator. + +* [01-Install.ipynb](./01-Install.ipynb): Install and configure a Feature Store in Kubernetes with the Operator. +* [02-Demo.ipynb](./02-Demo.ipynb): Validate the feature store with demo application. +* [03-Uninstall.ipynb](./03-Uninstall.ipynb): Clear the installed deployments. diff --git a/examples/operator-quickstart/feast.yaml b/examples/operator-quickstart/feast.yaml new file mode 100644 index 00000000000..b665ec5a8bf --- /dev/null +++ b/examples/operator-quickstart/feast.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Secret +metadata: + name: feast-data-stores + namespace: feast +stringData: + redis: | + connection_string: redis.feast.svc.cluster.local:6379 + sql: | + path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.feast.svc.cluster.local:5432/${POSTGRES_DB} + cache_ttl_seconds: 60 + sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true +--- +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: example + namespace: feast +spec: + feastProject: credit_scoring_local + feastProjectDir: + git: + url: https://github.com/feast-dev/feast-credit-score-local-tutorial + ref: 598a270 + services: + offlineStore: + persistence: + file: + type: duckdb + onlineStore: + persistence: + store: + type: redis + secretRef: + name: feast-data-stores + server: + envFrom: + - secretRef: + name: postgres-secret + env: + - name: MPLCONFIGDIR + value: /tmp + resources: + requests: + cpu: 150m + memory: 128Mi + registry: + local: + persistence: + store: + type: sql + secretRef: + name: feast-data-stores diff --git a/examples/operator-quickstart/postgres.yaml b/examples/operator-quickstart/postgres.yaml new file mode 100644 index 00000000000..e37caa16da4 --- /dev/null +++ b/examples/operator-quickstart/postgres.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Secret +metadata: + name: postgres-secret + namespace: feast +stringData: + POSTGRES_DB: feast + POSTGRES_USER: feast + POSTGRES_PASSWORD: feast +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: feast +spec: + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: 'postgres:16-alpine' + ports: + - containerPort: 5432 + envFrom: + - secretRef: + name: postgres-secret + volumeMounts: + - mountPath: /var/lib/postgresql + name: postgresdata + volumes: + - name: postgresdata + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: feast + labels: + app: postgres +spec: + type: ClusterIP + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP + selector: + app: postgres \ No newline at end of file diff --git a/examples/operator-quickstart/redis.yaml b/examples/operator-quickstart/redis.yaml new file mode 100644 index 00000000000..5d70b6bd5d6 --- /dev/null +++ b/examples/operator-quickstart/redis.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + namespace: feast +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: 'bitnami/redis:latest' + ports: + - containerPort: 6379 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: feast + labels: + app: redis +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + protocol: TCP + selector: + app: redis \ No newline at end of file diff --git a/examples/operator-rbac/03-uninstall.ipynb b/examples/operator-rbac/03-uninstall.ipynb new file mode 100644 index 00000000000..f9c794c03f8 --- /dev/null +++ b/examples/operator-rbac/03-uninstall.ipynb @@ -0,0 +1,175 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "## Uninstall", + "id": "bd1a081f3f7f5752" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### Uninstall the Operator and all Feast related objects##", + "id": "1175f3d6c5ee9bf0" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-05T19:09:52.349677Z", + "start_time": "2025-03-05T19:09:46.308482Z" + } + }, + "cell_type": "code", + "source": [ + "!kubectl delete -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml\n", + "!kubectl delete -f ../../infra/feast-operator/dist/install.yaml" + ], + "id": "f4b4c6fa4a1fe0a8", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "featurestore.feast.dev \"sample-kubernetes-auth\" deleted\r\n", + "namespace \"feast-operator-system\" deleted\r\n", + "customresourcedefinition.apiextensions.k8s.io \"featurestores.feast.dev\" deleted\r\n", + "serviceaccount \"feast-operator-controller-manager\" deleted\r\n", + "role.rbac.authorization.k8s.io \"feast-operator-leader-election-role\" deleted\r\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-featurestore-editor-role\" deleted\r\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-featurestore-viewer-role\" deleted\r\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-manager-role\" deleted\r\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-metrics-auth-role\" deleted\r\n", + "clusterrole.rbac.authorization.k8s.io \"feast-operator-metrics-reader\" deleted\r\n", + "rolebinding.rbac.authorization.k8s.io \"feast-operator-leader-election-rolebinding\" deleted\r\n", + "clusterrolebinding.rbac.authorization.k8s.io \"feast-operator-manager-rolebinding\" deleted\r\n", + "clusterrolebinding.rbac.authorization.k8s.io \"feast-operator-metrics-auth-rolebinding\" deleted\r\n", + "service \"feast-operator-controller-manager-metrics-service\" deleted\r\n", + "deployment.apps \"feast-operator-controller-manager\" deleted\r\n" + ] + } + ], + "execution_count": 6 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Uninstall Client Related Objects", + "id": "2a2aa884aeddfb99" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-05T19:09:54.655575Z", + "start_time": "2025-03-05T19:09:53.553918Z" + } + }, + "cell_type": "code", + "source": [ + "!echo \"Deleting RoleBindings...\"\n", + "!kubectl delete rolebinding feast-user-rolebinding -n feast --ignore-not-found\n", + "!kubectl delete rolebinding feast-admin-rolebinding -n feast --ignore-not-found\n", + "\n", + "!echo \"Deleting ServiceAccounts...\"\n", + "!kubectl delete serviceaccount feast-user-sa -n feast --ignore-not-found\n", + "!kubectl delete serviceaccount feast-admin-sa -n feast --ignore-not-found\n", + "!kubectl delete serviceaccount feast-unauthorized-user-sa -n feast --ignore-not-found\n" + ], + "id": "6ce30879d64bbd06", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Deleting RoleBindings...\r\n", + "rolebinding.rbac.authorization.k8s.io \"feast-user-rolebinding\" deleted\r\n", + "rolebinding.rbac.authorization.k8s.io \"feast-admin-rolebinding\" deleted\r\n", + "Deleting ServiceAccounts...\r\n", + "serviceaccount \"feast-user-sa\" deleted\r\n", + "serviceaccount \"feast-admin-sa\" deleted\r\n", + "serviceaccount \"feast-unauthorized-user-sa\" deleted\r\n" + ] + } + ], + "execution_count": 7 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Ensure everything has been removed, or is in the process of being terminated.", + "id": "638421caa8ff849e" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-05T19:09:59.868383Z", + "start_time": "2025-03-05T19:09:59.611048Z" + } + }, + "cell_type": "code", + "source": "!kubectl get all -n feast\n", + "id": "587eb85352a8a353", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No resources found in feast namespace.\r\n" + ] + } + ], + "execution_count": 8 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-05T19:10:07.846749Z", + "start_time": "2025-03-05T19:10:02.561070Z" + } + }, + "cell_type": "code", + "source": "!kubectl delete namespace feast", + "id": "7a0ce2d9e4a92828", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace \"feast\" deleted\r\n" + ] + } + ], + "execution_count": 9 + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "", + "id": "10707783148c5f8d" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/operator-rbac/1-setup-operator-rbac.ipynb b/examples/operator-rbac/1-setup-operator-rbac.ipynb new file mode 100644 index 00000000000..69cc285a01c --- /dev/null +++ b/examples/operator-rbac/1-setup-operator-rbac.ipynb @@ -0,0 +1,760 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Feast Operator with RBAC Configuration\n", + "## Objective\n", + "\n", + "This demo provides a reference implementation of a runbook on how to enable Role-Based Access Control (RBAC) for Feast using the Feast Operator with the Kubernetes authentication type. This serves as useful reference material for a cluster admin / MLOps engineer.\n", + "\n", + "The demo steps include deploying the Feast Operator, creating Feast instances with server components (registry, offline store, online store), and Feast client testing locally. The goal is to ensure secure access control for Feast instances deployed by the Feast Operator.\n", + " \n", + "Please read these reference documents for understanding the Feast RBAC framework.\n", + "- [RBAC Architecture](https://docs.feast.dev/v/master/getting-started/architecture/rbac) \n", + "- [RBAC Permission](https://docs.feast.dev/v/master/getting-started/concepts/permission).\n", + "- [RBAC Authorization Manager](https://docs.feast.dev/v/master/getting-started/components/authz_manager)\n" + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Deployment Architecture\n", + "In this notebook, we will deploy a distributed topology of Feast services, which includes:\n", + "\n", + "* `Registry Server`: Handles metadata storage for feature definitions.\n", + "* `Online Store Server`: Uses the `Registry Server` to query metadata and is responsible for low-latency serving of features.\n", + "* `Offline Store Server`: Uses the `Registry Server` to query metadata and provides access to batch data for historical feature retrieval.\n", + "\n", + "Additionally, we will cover:\n", + "* RBAC Configuration with Kubernetes Authentication for Feast resources." + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Prerequisites\n", + "* Kubernetes Cluster\n", + "* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) Kubernetes CLI tool." + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Install Prerequisites\n", + "The following commands install and configure all the prerequisites on a MacOS environment. You can find the\n", + "equivalent instructions on the offical documentation pages:\n", + "* Install the `kubectl` cli.\n", + "* Install Kubernetes and Container runtime (e.g. [Colima](https://github.com/abiosoft/colima)).\n", + " * Alternatively, authenticate to an existing Kubernetes or OpenShift cluster.\n", + " \n", + "```bash\n", + "brew install colima kubectl\n", + "colima start -r containerd -k -m 3 -d 100 -c 2 --cpu-type max -a x86_64\n", + "colima list\n", + "```" + ] + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:27:31.474254Z", + "start_time": "2025-03-06T18:27:31.012088Z" + } + }, + "cell_type": "code", + "source": [ + "!kubectl create ns feast\n", + "!kubectl config set-context --current --namespace feast" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast created\r\n", + "Context \"kind-kind\" modified.\r\n" + ] + } + ], + "execution_count": 1 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Validate the cluster setup:" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:32:23.198122Z", + "start_time": "2025-03-06T18:32:22.930547Z" + } + }, + "cell_type": "code", + "source": "!kubectl get ns feast", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME STATUS AGE\r\n", + "feast Active 4m52s\r\n" + ] + } + ], + "execution_count": 2 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Feast Admin Steps:\n", + "Feast Admins or MLOps Engineers may require Kubernetes Cluster Admin roles when working with OpenShift or Kubernetes clusters. Below is the list of steps Required to set up Feast RBAC with the Operator by an Admin or MLOps Engineer.\n", + "\n", + "1. **Install the Feast Operator**\n", + "2. **Install the Feast services via FeatureStore CR**\n", + "3. **Configure the RBAC Permissions**\n", + "4. **Perform Feast Apply**\n", + "5. **Setting Service Account and Role Binding**\n", + "\n", + "## Install the Feast Operator" + ] + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:32:40.721042Z", + "start_time": "2025-03-06T18:32:28.484245Z" + } + }, + "cell_type": "code", + "source": [ + "## Use this install command from a stable branch \n", + "!kubectl apply -f ../../infra/feast-operator/dist/install.yaml\n", + "\n", + "## OR, for the latest code/builds, use one the following commands from the 'master' branch\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:develop FS_IMG=quay.io/feastdev-ci/feature-server:develop\n", + "# !make -C ../../infra/feast-operator install deploy IMG=quay.io/feastdev-ci/feast-operator:$(git rev-parse HEAD) FS_IMG=quay.io/feastdev-ci/feature-server:$(git rev-parse HEAD)\n", + "\n", + "!kubectl wait --for=condition=available --timeout=5m deployment/feast-operator-controller-manager -n feast-operator-system" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/feast-operator-system created\r\n", + "customresourcedefinition.apiextensions.k8s.io/featurestores.feast.dev created\r\n", + "serviceaccount/feast-operator-controller-manager created\r\n", + "role.rbac.authorization.k8s.io/feast-operator-leader-election-role created\r\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-editor-role created\r\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-featurestore-viewer-role created\r\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-manager-role created\r\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-auth-role created\r\n", + "clusterrole.rbac.authorization.k8s.io/feast-operator-metrics-reader created\r\n", + "rolebinding.rbac.authorization.k8s.io/feast-operator-leader-election-rolebinding created\r\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-manager-rolebinding created\r\n", + "clusterrolebinding.rbac.authorization.k8s.io/feast-operator-metrics-auth-rolebinding created\r\n", + "service/feast-operator-controller-manager-metrics-service created\r\n", + "deployment.apps/feast-operator-controller-manager created\r\n", + "deployment.apps/feast-operator-controller-manager condition met\r\n" + ] + } + ], + "execution_count": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install the Feast services via FeatureStore CR\n", + "Next, we'll use the running Feast Operator to install the feast services with Server components online, offline, registry with kubernetes Authorization set. Apply the included [reference deployment](../../infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml) to install and configure Feast with kubernetes Authorization ." + ] + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:34:39.847211Z", + "start_time": "2025-03-06T18:34:39.378680Z" + } + }, + "source": [ + "!cat ../../infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml\n", + "!kubectl apply -f ../../infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml -n feast" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "apiVersion: feast.dev/v1alpha1\r\n", + "kind: FeatureStore\r\n", + "metadata:\r\n", + " name: sample-kubernetes-auth\r\n", + "spec:\r\n", + " feastProject: feast_rbac\r\n", + " authz:\r\n", + " kubernetes:\r\n", + " roles:\r\n", + " - feast-writer\r\n", + " - feast-reader\r\n", + " services:\r\n", + " offlineStore:\r\n", + " server: {}\r\n", + " onlineStore:\r\n", + " server: {}\r\n", + " registry:\r\n", + " local:\r\n", + " server: {}\r\n", + " ui: {}\r\n", + "featurestore.feast.dev/sample-kubernetes-auth created\r\n" + ] + } + ], + "execution_count": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Validate the running FeatureStore deployment\n", + "Validate the deployment status." + ] + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:35:05.202176Z", + "start_time": "2025-03-06T18:35:02.498106Z" + } + }, + "source": [ + "!kubectl get all\n", + "!kubectl wait --for=condition=available --timeout=8m deployment/feast-sample-kubernetes-auth" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME READY STATUS RESTARTS AGE\r\n", + "pod/feast-sample-kubernetes-auth-774f6df8df-95nc6 0/4 Running 0 22s\r\n", + "\r\n", + "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\r\n", + "service/feast-sample-kubernetes-auth-offline ClusterIP 10.96.38.230 80/TCP 22s\r\n", + "service/feast-sample-kubernetes-auth-online ClusterIP 10.96.140.194 80/TCP 22s\r\n", + "service/feast-sample-kubernetes-auth-registry ClusterIP 10.96.140.31 80/TCP 22s\r\n", + "service/feast-sample-kubernetes-auth-ui ClusterIP 10.96.26.21 80/TCP 22s\r\n", + "\r\n", + "NAME READY UP-TO-DATE AVAILABLE AGE\r\n", + "deployment.apps/feast-sample-kubernetes-auth 0/1 1 0 22s\r\n", + "\r\n", + "NAME DESIRED CURRENT READY AGE\r\n", + "replicaset.apps/feast-sample-kubernetes-auth-774f6df8df 1 1 0 22s\r\n", + "deployment.apps/feast-sample-kubernetes-auth condition met\r\n" + ] + } + ], + "execution_count": 5 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Validate that the FeatureStore CR is in a `Ready` state." + ] + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:35:55.728523Z", + "start_time": "2025-03-06T18:35:55.452894Z" + } + }, + "source": [ + "!kubectl get feast" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "NAME STATUS AGE\r\n", + "sample-kubernetes-auth Ready 76s\r\n" + ] + } + ], + "execution_count": 6 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Configure the RBAC Permissions\n", + "As we have created Kubernetes roles in FeatureStore CR to manage access control for Feast objects, the Python script `permissions_apply.py` will apply these roles to configure permissions. See the detailed code example below with comments." + ] + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:37:17.062072Z", + "start_time": "2025-03-06T18:37:16.930026Z" + } + }, + "cell_type": "code", + "source": [ + "#view the permissions \n", + "!cat permissions_apply.py" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Necessary modules for permissions and policies in Feast for RBAC\r\n", + "from feast.feast_object import ALL_RESOURCE_TYPES\r\n", + "from feast.permissions.action import READ, AuthzedAction, ALL_ACTIONS\r\n", + "from feast.permissions.permission import Permission\r\n", + "from feast.permissions.policy import RoleBasedPolicy\r\n", + "\r\n", + "# Define K8s roles same as created with FeatureStore CR\r\n", + "admin_roles = [\"feast-writer\"] # Full access (can create, update, delete ) Feast Resources\r\n", + "user_roles = [\"feast-reader\"] # Read-only access on Feast Resources\r\n", + "\r\n", + "# User permissions (feast_user_permission)\r\n", + "# - Grants read and describing Feast objects access\r\n", + "user_perm = Permission(\r\n", + " name=\"feast_user_permission\",\r\n", + " types=ALL_RESOURCE_TYPES,\r\n", + " policy=RoleBasedPolicy(roles=user_roles),\r\n", + " actions=[AuthzedAction.DESCRIBE] + READ # Read access (READ_ONLINE, READ_OFFLINE) + describe other Feast Resources.\r\n", + ")\r\n", + "\r\n", + "# Admin permissions (feast_admin_permission)\r\n", + "# - Grants full control over all resources\r\n", + "admin_perm = Permission(\r\n", + " name=\"feast_admin_permission\",\r\n", + " types=ALL_RESOURCE_TYPES,\r\n", + " policy=RoleBasedPolicy(roles=admin_roles),\r\n", + " actions=ALL_ACTIONS # Full permissions: CREATE, UPDATE, DELETE, READ, WRITE\r\n", + ")\r\n" + ] + } + ], + "execution_count": 7 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:37:31.662484Z", + "start_time": "2025-03-06T18:37:31.139869Z" + } + }, + "cell_type": "code", + "source": [ + "# Copy the Permissions to the pods under feature_repo directory\n", + "!kubectl cp permissions_apply.py $(kubectl get pods -l 'feast.dev/name=sample-kubernetes-auth' -ojsonpath=\"{.items[*].metadata.name}\"):/feast-data/feast_rbac/feature_repo -c online" + ], + "outputs": [], + "execution_count": 8 + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:37:38.003082Z", + "start_time": "2025-03-06T18:37:37.662378Z" + } + }, + "source": [ + "#view the feature_store.yaml configuration \n", + "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- cat feature_store.yaml" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "project: feast_rbac\r\n", + "provider: local\r\n", + "offline_store:\r\n", + " type: dask\r\n", + "online_store:\r\n", + " path: /feast-data/online_store.db\r\n", + " type: sqlite\r\n", + "registry:\r\n", + " path: /feast-data/registry.db\r\n", + " registry_type: file\r\n", + "auth:\r\n", + " type: kubernetes\r\n", + "entity_key_serialization_version: 3\r\n" + ] + } + ], + "execution_count": 9 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Apply the Permissions and Feast Object to Registry" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:37:56.131390Z", + "start_time": "2025-03-06T18:37:45.483916Z" + } + }, + "cell_type": "code", + "source": "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- feast apply", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ": MADV_DONTNEED does not work (memset will be used instead)\r\n", + ": (This is the expected behaviour if you are running under QEMU)\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\r\n", + " DUMMY_ENTITY = Entity(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_len\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/feast-data/feast_rbac/feature_repo/example_repo.py:27: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'driver'.\r\n", + " driver = Entity(name=\"driver\", join_keys=[\"driver_id\"])\r\n", + "Applying changes for project feast_rbac\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_store.py:579: RuntimeWarning: On demand feature view is an experimental feature. This API is stable, but the functionality does not scale well for offline retrieval\r\n", + " warnings.warn(\r\n", + "Created project \u001B[1m\u001B[32mfeast_rbac\u001B[0m\r\n", + "Created entity \u001B[1m\u001B[32mdriver\u001B[0m\r\n", + "Created feature view \u001B[1m\u001B[32mdriver_hourly_stats\u001B[0m\r\n", + "Created feature view \u001B[1m\u001B[32mdriver_hourly_stats_fresh\u001B[0m\r\n", + "Created on demand feature view \u001B[1m\u001B[32mtransformed_conv_rate\u001B[0m\r\n", + "Created on demand feature view \u001B[1m\u001B[32mtransformed_conv_rate_fresh\u001B[0m\r\n", + "Created feature service \u001B[1m\u001B[32mdriver_activity_v2\u001B[0m\r\n", + "Created feature service \u001B[1m\u001B[32mdriver_activity_v1\u001B[0m\r\n", + "Created feature service \u001B[1m\u001B[32mdriver_activity_v3\u001B[0m\r\n", + "Created permission \u001B[1m\u001B[32mfeast_admin_permission\u001B[0m\r\n", + "Created permission \u001B[1m\u001B[32mfeast_user_permission\u001B[0m\r\n", + "\r\n", + "Created sqlite table \u001B[1m\u001B[32mfeast_rbac_driver_hourly_stats_fresh\u001B[0m\r\n", + "Created sqlite table \u001B[1m\u001B[32mfeast_rbac_driver_hourly_stats\u001B[0m\r\n", + "\r\n" + ] + } + ], + "execution_count": 10 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "**List the applied permission details permissions on Feast Resources.**" + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:38:45.881715Z", + "start_time": "2025-03-06T18:38:04.170364Z" + } + }, + "source": [ + "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- feast permissions list-roles\n", + "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- feast permissions list\n", + "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- feast permissions describe feast_admin_permission\n", + "!kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- feast permissions describe feast_user_permission" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ": MADV_DONTNEED does not work (memset will be used instead)\r\n", + ": (This is the expected behaviour if you are running under QEMU)\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\r\n", + " DUMMY_ENTITY = Entity(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_len\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "+--------------+\r\n", + "| ROLE NAME |\r\n", + "+==============+\r\n", + "| feast-reader |\r\n", + "+--------------+\r\n", + "| feast-writer |\r\n", + "+--------------+\r\n", + ": MADV_DONTNEED does not work (memset will be used instead)\r\n", + ": (This is the expected behaviour if you are running under QEMU)\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\r\n", + " DUMMY_ENTITY = Entity(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_len\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "NAME TYPES NAME_PATTERNS ACTIONS ROLES REQUIRED_TAGS\r\n", + "feast_admin_permission Project - CREATE feast-writer -\r\n", + " FeatureView DESCRIBE\r\n", + " OnDemandFeatureView UPDATE\r\n", + " BatchFeatureView DELETE\r\n", + " StreamFeatureView READ_ONLINE\r\n", + " Entity READ_OFFLINE\r\n", + " FeatureService WRITE_ONLINE\r\n", + " DataSource WRITE_OFFLINE\r\n", + " ValidationReference\r\n", + " SavedDataset\r\n", + " Permission\r\n", + "feast_user_permission Project - DESCRIBE feast-reader -\r\n", + " FeatureView READ_OFFLINE\r\n", + " OnDemandFeatureView READ_ONLINE\r\n", + " BatchFeatureView\r\n", + " StreamFeatureView\r\n", + " Entity\r\n", + " FeatureService\r\n", + " DataSource\r\n", + " ValidationReference\r\n", + " SavedDataset\r\n", + " Permission\r\n", + ": MADV_DONTNEED does not work (memset will be used instead)\r\n", + ": (This is the expected behaviour if you are running under QEMU)\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\r\n", + " DUMMY_ENTITY = Entity(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_len\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "spec:\r\n", + " name: feast_admin_permission\r\n", + " types:\r\n", + " - PROJECT\r\n", + " - FEATURE_VIEW\r\n", + " - ON_DEMAND_FEATURE_VIEW\r\n", + " - BATCH_FEATURE_VIEW\r\n", + " - STREAM_FEATURE_VIEW\r\n", + " - ENTITY\r\n", + " - FEATURE_SERVICE\r\n", + " - DATA_SOURCE\r\n", + " - VALIDATION_REFERENCE\r\n", + " - SAVED_DATASET\r\n", + " - PERMISSION\r\n", + " actions:\r\n", + " - CREATE\r\n", + " - DESCRIBE\r\n", + " - UPDATE\r\n", + " - DELETE\r\n", + " - READ_ONLINE\r\n", + " - READ_OFFLINE\r\n", + " - WRITE_ONLINE\r\n", + " - WRITE_OFFLINE\r\n", + " policy:\r\n", + " roleBasedPolicy:\r\n", + " roles:\r\n", + " - feast-writer\r\n", + "meta:\r\n", + " createdTimestamp: '2025-03-06T18:37:55.742625Z'\r\n", + " lastUpdatedTimestamp: '2025-03-06T18:37:55.742625Z'\r\n", + "\r\n", + ": MADV_DONTNEED does not work (memset will be used instead)\r\n", + ": (This is the expected behaviour if you are running under QEMU)\r\n", + "/opt/app-root/lib64/python3.11/site-packages/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\r\n", + " DUMMY_ENTITY = Entity(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "/opt/app-root/lib64/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_len\" in \"SqliteOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\r\n", + " warnings.warn(\r\n", + "spec:\r\n", + " name: feast_user_permission\r\n", + " types:\r\n", + " - PROJECT\r\n", + " - FEATURE_VIEW\r\n", + " - ON_DEMAND_FEATURE_VIEW\r\n", + " - BATCH_FEATURE_VIEW\r\n", + " - STREAM_FEATURE_VIEW\r\n", + " - ENTITY\r\n", + " - FEATURE_SERVICE\r\n", + " - DATA_SOURCE\r\n", + " - VALIDATION_REFERENCE\r\n", + " - SAVED_DATASET\r\n", + " - PERMISSION\r\n", + " actions:\r\n", + " - DESCRIBE\r\n", + " - READ_OFFLINE\r\n", + " - READ_ONLINE\r\n", + " policy:\r\n", + " roleBasedPolicy:\r\n", + " roles:\r\n", + " - feast-reader\r\n", + "meta:\r\n", + " createdTimestamp: '2025-03-06T18:37:55.743643Z'\r\n", + " lastUpdatedTimestamp: '2025-03-06T18:37:55.743643Z'\r\n", + "\r\n" + ] + } + ], + "execution_count": 11 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Setting Up Service Account and RoleBinding \n", + "The steps below will:\n", + "- Create **three different ServiceAccounts** for Feast.\n", + "- Assign appropriate **RoleBindings** for access control." + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Test Cases\n", + "| User Type | ServiceAccount | RoleBinding Assigned | Expected Behavior in output |\n", + "|----------------|-----------------------------|----------------------|------------------------------------------------------------|\n", + "| **Read-Only** | `feast-user-sa` | `feast-reader` | Can **read** from the feature store, but **cannot write**. |\n", + "| **Unauthorized** | `feast-unauthorized-user-sa` | _None_ | **Access should be denied** in `test.py`. |\n", + "| **Admin** | `feast-admin-sa` | `feast-writer` | Can **read and write** feature store data. |" + ] + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### Setup Read-Only Feast User the ServiceAccount and Role Binding (serviceaccount: feast-user-sa, role: feast-reader)" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:42:04.122440Z", + "start_time": "2025-03-06T18:42:03.397214Z" + } + }, + "cell_type": "code", + "source": [ + "# Step 1: Create the ServiceAccount\n", + "!echo \"Creating ServiceAccount: feast-user-sa\"\n", + "!kubectl create serviceaccount feast-user-sa -n feast\n", + "\n", + "# Step 2: Assign RoleBinding (Read-Only Access for Feast)\n", + "!echo \"Assigning Read-Only RoleBinding: feast-user-rolebinding\"\n", + "!kubectl create rolebinding feast-user-rolebinding --role=feast-reader --serviceaccount=feast:feast-user-sa -n feast" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating ServiceAccount: feast-user-sa\r\n", + "serviceaccount/feast-user-sa created\r\n", + "Assigning Read-Only RoleBinding: feast-user-rolebinding\r\n", + "rolebinding.rbac.authorization.k8s.io/feast-user-rolebinding created\r\n" + ] + } + ], + "execution_count": 12 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### Setup Unauthorized Feast User (serviceaccount: feast-unauthorized-user-sa, role: None)" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:42:07.992216Z", + "start_time": "2025-03-06T18:42:07.721628Z" + } + }, + "cell_type": "code", + "source": [ + "# Create the ServiceAccount (Without RoleBinding)\n", + "!echo \"Creating Unauthorized ServiceAccount: feast-unauthorized-user-sa\"\n", + "!kubectl create serviceaccount feast-unauthorized-user-sa -n feast\n" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating Unauthorized ServiceAccount: feast-unauthorized-user-sa\r\n", + "serviceaccount/feast-unauthorized-user-sa created\r\n" + ] + } + ], + "execution_count": 13 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Setup Test Admin Feast User (serviceaccount: feast-admin-sa, role: feast-writer)" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:42:11.651408Z", + "start_time": "2025-03-06T18:42:11.097231Z" + } + }, + "cell_type": "code", + "source": [ + "# Create the ServiceAccount\n", + "!echo \"Creating ServiceAccount: feast-admin-sa\"\n", + "!kubectl create serviceaccount feast-admin-sa -n feast\n", + "\n", + "# Assign RoleBinding (Admin Access for Feast)\n", + "!echo \"Assigning Admin RoleBinding: feast-admin-rolebinding\"\n", + "!kubectl create rolebinding feast-admin-rolebinding --role=feast-writer --serviceaccount=feast:feast-admin-sa -n feast\n" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating ServiceAccount: feast-admin-sa\r\n", + "serviceaccount/feast-admin-sa created\r\n", + "Assigning Admin RoleBinding: feast-admin-rolebinding\r\n", + "rolebinding.rbac.authorization.k8s.io/feast-admin-rolebinding created\r\n" + ] + } + ], + "execution_count": 14 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "[Next Run Client notebook](./2-client.ipynb)" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/operator-rbac/2-client.ipynb b/examples/operator-rbac/2-client.ipynb new file mode 100644 index 00000000000..cf9d57cb5bc --- /dev/null +++ b/examples/operator-rbac/2-client.ipynb @@ -0,0 +1,828 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Feast Client with RBAC\n", + "### Kubernetes RBAC Authorization\n", + "\n", + "## Feast Role-Based Access Control (RBAC) in Kubernetes \n", + "\n", + "Feast **Role-Based Access Control (RBAC)** in Kubernetes supports authentication both **inside a Kubernetes pod** and for **external clients** using the `LOCAL_K8S_TOKEN` environment variable. \n", + "\n", + "\n", + "### Inside a Kubernetes Pod\n", + "Feast automatically retrieves the Kubernetes ServiceAccount token from:\n", + "```\n", + "/var/run/secrets/kubernetes.io/serviceaccount/token\n", + "```\n", + "This means:\n", + "- No manual configuration is needed inside a pod.\n", + "- The token is mounted automatically and used for authentication.\n", + "- Developer just need create the binding with role and service account accordingly.\n", + "- Code Reference: \n", + "[Feast Kubernetes Auth Client Manager (Pod Token Usage)](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/permissions/client/kubernetes_auth_client_manager.py#L15) \n", + "- Using a service account from a pod [Example](https://github.com/feast-dev/feast/blob/master/examples/rbac-remote/client/k8s/)\n", + "\n", + "### Outside a Kubernetes Pod (External Clients & Local Testing)\n", + " \n", + "If running Feast outside of Kubernetes, authentication requires setting the token manually to the environment variable `LOCAL_K8S_TOKEN` :\n", + "```sh\n", + "export LOCAL_K8S_TOKEN=\"your-service-account-token\"\n", + "```\n", + "\n", + "For more details, refer the user guide: [Kubernetes RBAC Authorization](https://docs.feast.dev/master/getting-started/components/authz_manager#kubernetes-rbac-authorization) \n" + ], + "id": "bb0145c9c1f6ebcc" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Test Cases\n", + "| User Type | ServiceAccount | RoleBinding Assigned | Expected Behavior in output |\n", + "|----------------|-----------------------------|----------------------|------------------------------------------------------------|\n", + "| **Read-Only** | `feast-user-sa` | `feast-reader` | Can **read** from the feature store, but **cannot write**. |\n", + "| **Unauthorized** | `feast-unauthorized-user-sa` | _None_ | **Access should be denied** in `test.py`. |\n", + "| **Admin** | `feast-admin-sa` | `feast-writer` | Can **read and write** feature store data. |" + ], + "id": "160681ba4ab3c2c5" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### Feature Store settings", + "id": "6590c081efb1fe3c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:47:45.151296Z", + "start_time": "2025-03-06T18:47:45.024854Z" + } + }, + "cell_type": "code", + "source": "!cat client/feature_store.yaml", + "id": "fac5f67ff391b5cf", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "project: feast_rbac\r\n", + "provider: local\r\n", + "offline_store:\r\n", + " host: localhost\r\n", + " type: remote\r\n", + " port: 8081\r\n", + "online_store:\r\n", + " path: http://localhost:8082\r\n", + " type: remote\r\n", + "registry:\r\n", + " path: localhost:8083\r\n", + " registry_type: remote\r\n", + "auth:\r\n", + " type: kubernetes\r\n", + "entity_key_serialization_version: 3\r\n" + ] + } + ], + "execution_count": 1 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "**The Operator client feature store ConfigMap** containing the `feature_store.yaml `settings. We can retrieve it and port froward to local as we are testing locally.", + "id": "84f73e09711bff9f" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:46:36.029308Z", + "start_time": "2025-03-06T18:46:35.712532Z" + } + }, + "cell_type": "code", + "source": "!kubectl get configmap feast-sample-kubernetes-auth-client -n feast -o jsonpath='{.data.feature_store\\.yaml}' ", + "id": "456fb4df46f32380", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "project: feast_rbac\r\n", + "provider: local\r\n", + "offline_store:\r\n", + " host: feast-sample-kubernetes-auth-offline.feast.svc.cluster.local\r\n", + " type: remote\r\n", + " port: 80\r\n", + "online_store:\r\n", + " path: http://feast-sample-kubernetes-auth-online.feast.svc.cluster.local:80\r\n", + " type: remote\r\n", + "registry:\r\n", + " path: feast-sample-kubernetes-auth-registry.feast.svc.cluster.local:80\r\n", + " registry_type: remote\r\n", + "auth:\r\n", + " type: kubernetes\r\n", + "entity_key_serialization_version: 3\r\n" + ] + } + ], + "execution_count": 34 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### The function below is executed to support the preparation of client testing.", + "id": "ae61f4dca31f3466" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Run Port Forwarding for All Services for local testing ", + "id": "28636825ae8f676d" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:47:55.237205Z", + "start_time": "2025-03-06T18:47:55.226143Z" + } + }, + "cell_type": "code", + "source": [ + "import subprocess\n", + "\n", + "# Define services and their local ports\n", + "services = {\n", + " \"offline_store\": (\"feast-sample-kubernetes-auth-offline\", 8081),\n", + " \"online_store\": (\"feast-sample-kubernetes-auth-online\", 8082),\n", + " \"registry\": (\"feast-sample-kubernetes-auth-registry\", 8083),\n", + "}\n", + "\n", + "# Start port-forwarding for each service\n", + "port_forward_processes = {}\n", + "for name, (service, local_port) in services.items():\n", + " cmd = f\"kubectl port-forward svc/{service} -n feast {local_port}:80\"\n", + " process = subprocess.Popen(cmd, shell=True)\n", + " port_forward_processes[name] = process\n", + " print(f\"Port forwarding {service} -> localhost:{local_port}\")" + ], + "id": "c014248190863e8a", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Port forwarding feast-sample-kubernetes-auth-offline -> localhost:8081\n", + "Port forwarding feast-sample-kubernetes-auth-online -> localhost:8082\n", + "Port forwarding feast-sample-kubernetes-auth-registry -> localhost:8083\n" + ] + } + ], + "execution_count": 2 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Function to retrieve a Kubernetes service account token and set it as an environment variable", + "id": "c0eccef6379f442c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T18:48:00.150752Z", + "start_time": "2025-03-06T18:48:00.143370Z" + } + }, + "cell_type": "code", + "source": [ + "import subprocess\n", + "import os\n", + "\n", + "def get_k8s_token(service_account):\n", + " namespace = \"feast\"\n", + "\n", + " if not service_account:\n", + " raise ValueError(\"Service account name is required.\")\n", + "\n", + " result = subprocess.run(\n", + " [\"kubectl\", \"create\", \"token\", service_account, \"-n\", namespace],\n", + " capture_output=True, text=True, check=True\n", + " )\n", + "\n", + " token = result.stdout.strip()\n", + "\n", + " if not token:\n", + " return None # Silently return None if token retrieval fails\n", + "\n", + " os.environ[\"LOCAL_K8S_TOKEN\"] = token\n", + " return \"Token Retrieved: ***** (hidden for security)\"\n" + ], + "id": "70bdbcd7b3fe44", + "outputs": [], + "execution_count": 3 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "**Generating training data**. The following test functions were copied from the `test_workflow.py` template but we added `try` blocks to print only \n", + "the relevant error messages, since we expect to receive errors from the permission enforcement modules." + ], + "id": "8c9e27ec4ed8ca2c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:16:04.254201Z", + "start_time": "2025-03-06T20:16:04.245605Z" + } + }, + "cell_type": "code", + "source": [ + "from feast import FeatureStore\n", + "from feast.data_source import PushMode\n", + "from datetime import datetime\n", + "import pandas as pd\n", + "\n", + "# Initialize Feature Store\n", + "store = FeatureStore(repo_path=\"client\")\n", + "\n", + "def fetch_historical_features_entity_df(store: FeatureStore, for_batch_scoring: bool):\n", + " \"\"\"Fetch historical features for training or batch scoring.\"\"\"\n", + " try:\n", + " entity_df = pd.DataFrame.from_dict(\n", + " {\n", + " \"driver_id\": [1001, 1002, 1003],\n", + " \"event_timestamp\": [\n", + " datetime(2021, 4, 12, 10, 59, 42),\n", + " datetime(2021, 4, 12, 8, 12, 10),\n", + " datetime(2021, 4, 12, 16, 40, 26),\n", + " ],\n", + " \"label_driver_reported_satisfaction\": [1, 5, 3],\n", + " \"val_to_add\": [1, 2, 3],\n", + " \"val_to_add_2\": [10, 20, 30],\n", + " }\n", + " )\n", + " if for_batch_scoring:\n", + " entity_df[\"event_timestamp\"] = pd.to_datetime(\"now\", utc=True)\n", + "\n", + " training_df = store.get_historical_features(\n", + " entity_df=entity_df,\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"transformed_conv_rate:conv_rate_plus_val1\",\n", + " \"transformed_conv_rate:conv_rate_plus_val2\",\n", + " ],\n", + " ).to_df()\n", + " print(f\"Successfully fetched {'batch scoring' if for_batch_scoring else 'training'} historical features:\\n\", training_df.head())\n", + "\n", + " except PermissionError:\n", + " print(\"\\n*** PERMISSION DENIED *** Cannot fetch historical features.\")\n", + " except Exception as e:\n", + " print(f\"Unexpected error while fetching historical features: {e}\")\n", + "\n", + "def fetch_online_features(store: FeatureStore, source: str = \"\"):\n", + " \"\"\"Fetch online features from the feature store.\"\"\"\n", + " try:\n", + " entity_rows = [\n", + " {\n", + " \"driver_id\": 1001,\n", + " \"val_to_add\": 1000,\n", + " \"val_to_add_2\": 2000,\n", + " },\n", + " {\n", + " \"driver_id\": 1002,\n", + " \"val_to_add\": 1001,\n", + " \"val_to_add_2\": 2002,\n", + " },\n", + " ]\n", + " if source == \"feature_service\":\n", + " features_to_fetch = store.get_feature_service(\"driver_activity_v1\")\n", + " elif source == \"push\":\n", + " features_to_fetch = store.get_feature_service(\"driver_activity_v3\")\n", + " else:\n", + " features_to_fetch = [\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"transformed_conv_rate:conv_rate_plus_val1\",\n", + " \"transformed_conv_rate:conv_rate_plus_val2\",\n", + " ]\n", + "\n", + " returned_features = store.get_online_features(\n", + " features=features_to_fetch,\n", + " entity_rows=entity_rows,\n", + " ).to_dict()\n", + "\n", + " print(f\"Successfully fetched online features {'via feature service' if source else 'directly'}:\\n\")\n", + " for key, value in sorted(returned_features.items()):\n", + " print(f\"{key} : {value}\")\n", + "\n", + " except PermissionError:\n", + " print(\"\\n*** PERMISSION DENIED *** Cannot fetch online features.\")\n", + " except Exception as e:\n", + " print(f\"Unexpected error while fetching online features: {e}\")\n", + "\n", + "def check_permissions():\n", + " \"\"\"Check user role, test various Feast operations,.\"\"\"\n", + "\n", + " feature_views = []\n", + "\n", + " # Step 1: List feature views\n", + " print(\"\\n--- List feature views ---\")\n", + " try:\n", + " feature_views = store.list_feature_views()\n", + " if not feature_views:\n", + " print(\"No feature views found. You might not have access or they haven't been created.\")\n", + " has_feature_view_access = False\n", + " else:\n", + " print(f\"Successfully listed {len(feature_views)} feature views:\")\n", + " for fv in feature_views:\n", + " print(f\" - {fv.name}\")\n", + "\n", + " except PermissionError:\n", + " print(\"\\n*** PERMISSION DENIED *** Cannot list feature views.\")\n", + " has_feature_view_access = False\n", + " except Exception as e:\n", + " print(f\"Unexpected error listing feature views: {e}\")\n", + " has_feature_view_access = False\n", + "\n", + " # Step 2: Fetch Historical Features\n", + " print(\"\\n--- Fetching Historical Features for Training ---\")\n", + " fetch_historical_features_entity_df(store, for_batch_scoring=False)\n", + "\n", + " print(\"\\n--- Fetching Historical Features for Batch Scoring ---\")\n", + " fetch_historical_features_entity_df(store, for_batch_scoring=True)\n", + "\n", + " # Step 3: Apply Feature Store\n", + " print(\"\\n--- Write to Feature Store ---\")\n", + " try:\n", + " store.apply(feature_views)\n", + " print(\"User has write access to the feature store.\")\n", + " except PermissionError:\n", + " print(\"\\n*** PERMISSION DENIED *** User lacks permission to modify the feature store.\")\n", + " except Exception as e:\n", + " print(f\"Unexpected error testing write access: {e}\")\n", + "\n", + " # Step 4: Fetch Online Features\n", + " print(\"\\n--- Fetching Online Features ---\")\n", + " fetch_online_features(store)\n", + "\n", + " print(\"\\n--- Fetching Online Features via Feature Service ---\")\n", + " fetch_online_features(store, source=\"feature_service\")\n", + "\n", + " print(\"\\n--- Fetching Online Features via Push Source ---\")\n", + " fetch_online_features(store, source=\"push\")\n", + "\n", + " print(\"\\n--- Performing Push Source ---\")\n", + " # Step 5: Simulate Event Push (Streaming Ingestion)\n", + " try:\n", + " event_df = pd.DataFrame.from_dict(\n", + " {\n", + " \"driver_id\": [1001],\n", + " \"event_timestamp\": [datetime.now()],\n", + " \"created\": [datetime.now()],\n", + " \"conv_rate\": [1.0],\n", + " \"acc_rate\": [1.0],\n", + " \"avg_daily_trips\": [1000],\n", + " }\n", + " )\n", + " store.push(\"driver_stats_push_source\", event_df, to=PushMode.ONLINE_AND_OFFLINE)\n", + " print(\"Successfully pushed a test event.\")\n", + " except PermissionError:\n", + " print(\"\\n*** PERMISSION DENIED *** Cannot push event (no write access).\")\n", + " except Exception as e:\n", + " print(f\"Unexpected error while pushing event: {e}\")\n" + ], + "id": "934963c5f6b18930", + "outputs": [], + "execution_count": 51 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "### Test Read-Only Feast User \n", + "**Step 1: Set the Token**" + ], + "id": "84e3f83699b8d83" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:12:44.771268Z", + "start_time": "2025-03-06T20:12:44.691353Z" + } + }, + "cell_type": "code", + "source": "get_k8s_token(\"feast-user-sa\")", + "id": "f1fe8baa02d27d38", + "outputs": [ + { + "data": { + "text/plain": [ + "'Token Retrieved: ***** (hidden for security)'" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 48 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "**Step 2: Test misc functions from offline, online, materialize_incremental, and others**", + "id": "140c909fa8bcc6ab" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:16:16.680582Z", + "start_time": "2025-03-06T20:16:14.930480Z" + } + }, + "cell_type": "code", + "source": [ + "# Run the permission check function\n", + "check_permissions()\n" + ], + "id": "14b7ad38368db767", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- List feature views ---\n", + "Successfully listed 2 feature views:\n", + " - driver_hourly_stats\n", + " - driver_hourly_stats_fresh\n", + "\n", + "--- Fetching Historical Features for Training ---\n", + "Handling connection for 8081\n", + "Successfully fetched training historical features:\n", + " driver_id event_timestamp label_driver_reported_satisfaction \\\n", + "0 1001 2021-04-12 10:59:42+00:00 1 \n", + "1 1002 2021-04-12 08:12:10+00:00 5 \n", + "2 1003 2021-04-12 16:40:26+00:00 3 \n", + "\n", + " val_to_add val_to_add_2 conv_rate acc_rate avg_daily_trips \\\n", + "0 1 10 0.677818 0.453707 193 \n", + "1 2 20 0.328160 0.900565 929 \n", + "2 3 30 0.787191 0.958963 571 \n", + "\n", + " conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 1.677818 10.677818 \n", + "1 2.328160 20.328160 \n", + "2 3.787191 30.787191 \n", + "\n", + "--- Fetching Historical Features for Batch Scoring ---\n", + "Handling connection for 8081\n", + "Successfully fetched batch scoring historical features:\n", + " driver_id event_timestamp \\\n", + "0 1001 2025-03-06 20:16:15.556223+00:00 \n", + "1 1002 2025-03-06 20:16:15.556223+00:00 \n", + "2 1003 2025-03-06 20:16:15.556223+00:00 \n", + "\n", + " label_driver_reported_satisfaction val_to_add val_to_add_2 conv_rate \\\n", + "0 1 1 10 0.782836 \n", + "1 5 2 20 0.731948 \n", + "2 3 3 30 0.613211 \n", + "\n", + " acc_rate avg_daily_trips conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 0.729726 652 1.782836 10.782836 \n", + "1 0.384902 902 2.731948 20.731948 \n", + "2 0.075386 101 3.613211 30.613211 \n", + "\n", + "--- Write to Feature Store ---\n", + "\n", + "*** PERMISSION DENIED *** User lacks permission to modify the feature store.\n", + "\n", + "--- Fetching Online Features ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features directly:\n", + "\n", + "acc_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Fetching Online Features via Feature Service ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features via feature service:\n", + "\n", + "conv_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Fetching Online Features via Push Source ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features via feature service:\n", + "\n", + "acc_rate : [None, None]\n", + "avg_daily_trips : [None, None]\n", + "conv_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Performing Push Source ---\n", + "Unexpected error while pushing event: \n" + ] + } + ], + "execution_count": 53 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "### Test Unauthorized Feast User ", + "id": "e5e63a172da6d6d7" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:16:38.487573Z", + "start_time": "2025-03-06T20:16:38.351889Z" + } + }, + "cell_type": "code", + "source": [ + "# Retrieve and store the token\n", + "get_k8s_token(\"feast-unauthorized-user-sa\")" + ], + "id": "a7b3a6578fcf5c3c", + "outputs": [ + { + "data": { + "text/plain": [ + "'Token Retrieved: ***** (hidden for security)'" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 54 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:16:41.522132Z", + "start_time": "2025-03-06T20:16:41.254668Z" + } + }, + "cell_type": "code", + "source": "check_permissions()", + "id": "7aea5658325ab008", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- List feature views ---\n", + "No feature views found. You might not have access or they haven't been created.\n", + "\n", + "--- Fetching Historical Features for Training ---\n", + "\n", + "*** PERMISSION DENIED *** Cannot fetch historical features.\n", + "\n", + "--- Fetching Historical Features for Batch Scoring ---\n", + "\n", + "*** PERMISSION DENIED *** Cannot fetch historical features.\n", + "\n", + "--- Write to Feature Store ---\n", + "\n", + "*** PERMISSION DENIED *** User lacks permission to modify the feature store.\n", + "\n", + "--- Fetching Online Features ---\n", + "\n", + "*** PERMISSION DENIED *** Cannot fetch online features.\n", + "\n", + "--- Fetching Online Features via Feature Service ---\n", + "\n", + "*** PERMISSION DENIED *** Cannot fetch online features.\n", + "\n", + "--- Fetching Online Features via Push Source ---\n", + "\n", + "*** PERMISSION DENIED *** Cannot fetch online features.\n", + "\n", + "--- Performing Push Source ---\n", + "Unexpected error while pushing event: Unable to find push source 'driver_stats_push_source'.\n" + ] + } + ], + "execution_count": 55 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "## Test Admin Feast User", + "id": "cb78ced7c37ceb4c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:17:02.206503Z", + "start_time": "2025-03-06T20:17:02.137409Z" + } + }, + "cell_type": "code", + "source": [ + "# Retrieve and store the token\n", + "get_k8s_token(\"feast-admin-sa\")" + ], + "id": "4f10aae116825619", + "outputs": [ + { + "data": { + "text/plain": [ + "'Token Retrieved: ***** (hidden for security)'" + ] + }, + "execution_count": 56, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 56 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-06T20:17:07.799782Z", + "start_time": "2025-03-06T20:17:05.946696Z" + } + }, + "cell_type": "code", + "source": "check_permissions()", + "id": "7a6133f052b9cfe1", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- List feature views ---\n", + "Successfully listed 2 feature views:\n", + " - driver_hourly_stats\n", + " - driver_hourly_stats_fresh\n", + "\n", + "--- Fetching Historical Features for Training ---\n", + "Handling connection for 8081\n", + "Successfully fetched training historical features:\n", + " driver_id event_timestamp label_driver_reported_satisfaction \\\n", + "0 1001 2021-04-12 10:59:42+00:00 1 \n", + "1 1002 2021-04-12 08:12:10+00:00 5 \n", + "2 1003 2021-04-12 16:40:26+00:00 3 \n", + "\n", + " val_to_add val_to_add_2 conv_rate acc_rate avg_daily_trips \\\n", + "0 1 10 0.677818 0.453707 193 \n", + "1 2 20 0.328160 0.900565 929 \n", + "2 3 30 0.787191 0.958963 571 \n", + "\n", + " conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 1.677818 10.677818 \n", + "1 2.328160 20.328160 \n", + "2 3.787191 30.787191 \n", + "\n", + "--- Fetching Historical Features for Batch Scoring ---\n", + "Handling connection for 8081\n", + "Successfully fetched batch scoring historical features:\n", + " driver_id event_timestamp \\\n", + "0 1001 2025-03-06 20:17:06.566035+00:00 \n", + "1 1002 2025-03-06 20:17:06.566035+00:00 \n", + "2 1003 2025-03-06 20:17:06.566035+00:00 \n", + "\n", + " label_driver_reported_satisfaction val_to_add val_to_add_2 conv_rate \\\n", + "0 1 1 10 0.782836 \n", + "1 5 2 20 0.731948 \n", + "2 3 3 30 0.613211 \n", + "\n", + " acc_rate avg_daily_trips conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 0.729726 652 1.782836 10.782836 \n", + "1 0.384902 902 2.731948 20.731948 \n", + "2 0.075386 101 3.613211 30.613211 \n", + "\n", + "--- Write to Feature Store ---\n", + "User has write access to the feature store.\n", + "\n", + "--- Fetching Online Features ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features directly:\n", + "\n", + "acc_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Fetching Online Features via Feature Service ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features via feature service:\n", + "\n", + "conv_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Fetching Online Features via Push Source ---\n", + "Handling connection for 8082\n", + "Successfully fetched online features via feature service:\n", + "\n", + "acc_rate : [None, None]\n", + "avg_daily_trips : [None, None]\n", + "conv_rate : [None, None]\n", + "conv_rate_plus_val1 : [None, None]\n", + "conv_rate_plus_val2 : [None, None]\n", + "driver_id : [1001, 1002]\n", + "\n", + "--- Performing Push Source ---\n", + "Unexpected error while pushing event: \n" + ] + } + ], + "execution_count": 57 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + " **Note:**\n", + "**Currently, remote materialization not available in Feast when using the Remote Client**\n", + "**Workaround: Consider using running it from pod like**\n", + " \n", + " `kubectl exec deploy/feast-sample-kubernetes-auth -itc online -- bash -c 'feast materialize-incremental $(date -u +\"%Y-%m-%dT%H:%M:%S\")`\n" + ], + "id": "e451c30649630b2f" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Terminate the process", + "id": "e88442b1bae2b327" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-03-05T19:09:29.743583Z", + "start_time": "2025-03-05T19:09:29.734671Z" + } + }, + "cell_type": "code", + "source": [ + "for name, process in port_forward_processes.items():\n", + " process.terminate()\n", + " print(f\"Stopped port forwarding for {name}\")" + ], + "id": "2984d62766da122a", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopped port forwarding for offline_store\n", + "Stopped port forwarding for online_store\n", + "Stopped port forwarding for registry\n" + ] + } + ], + "execution_count": 25 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "[Next: Uninstall the Operator and all Feast objects](./03-uninstall.ipynb)", + "id": "38c54e92643e0bda" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/operator-rbac/README.md b/examples/operator-rbac/README.md new file mode 100644 index 00000000000..9c0a0461678 --- /dev/null +++ b/examples/operator-rbac/README.md @@ -0,0 +1,6 @@ +# Running the Feast RBAC example on Kubernetes using the Feast Operator. + +1. [1-setup-operator-rbac.ipynb](1-setup-operator-rbac.ipynb) will guide you through how to setup Role-Based Access Control (RBAC) for Feast using the [Feast Operator](../../infra/feast-operator/) and Kubernetes Authentication. This Feast Admin Step requires you to setup the operator and Feast RBAC on K8s. +2. [2-client.ipynb](2-client.ipynb) Validate the RBAC with the client example using different test cases using a service account token locally. +3. [03-uninstall.ipynb](03-uninstall.ipynb) Clear the installed deployments and K8s Objects. + diff --git a/examples/operator-rbac/client/feature_store.yaml b/examples/operator-rbac/client/feature_store.yaml new file mode 100644 index 00000000000..49a4c426363 --- /dev/null +++ b/examples/operator-rbac/client/feature_store.yaml @@ -0,0 +1,15 @@ +project: feast_rbac +provider: local +offline_store: + host: localhost + type: remote + port: 8081 +online_store: + path: http://localhost:8082 + type: remote +registry: + path: localhost:8083 + registry_type: remote +auth: + type: kubernetes +entity_key_serialization_version: 3 diff --git a/examples/operator-rbac/permissions_apply.py b/examples/operator-rbac/permissions_apply.py new file mode 100644 index 00000000000..0d46ad5260a --- /dev/null +++ b/examples/operator-rbac/permissions_apply.py @@ -0,0 +1,27 @@ +# Necessary modules for permissions and policies in Feast for RBAC +from feast.feast_object import ALL_RESOURCE_TYPES +from feast.permissions.action import READ, AuthzedAction, ALL_ACTIONS +from feast.permissions.permission import Permission +from feast.permissions.policy import RoleBasedPolicy + +# Define K8s roles same as created with FeatureStore CR +admin_roles = ["feast-writer"] # Full access (can create, update, delete ) Feast Resources +user_roles = ["feast-reader"] # Read-only access on Feast Resources + +# User permissions (feast_user_permission) +# - Grants read and describing Feast objects access +user_perm = Permission( + name="feast_user_permission", + types=ALL_RESOURCE_TYPES, + policy=RoleBasedPolicy(roles=user_roles), + actions=[AuthzedAction.DESCRIBE] + READ # Read access (READ_ONLINE, READ_OFFLINE) + describe other Feast Resources. +) + +# Admin permissions (feast_admin_permission) +# - Grants full control over all resources +admin_perm = Permission( + name="feast_admin_permission", + types=ALL_RESOURCE_TYPES, + policy=RoleBasedPolicy(roles=admin_roles), + actions=ALL_ACTIONS # Full permissions: CREATE, UPDATE, DELETE, READ, WRITE +) diff --git a/examples/python-helm-demo/README.md b/examples/python-helm-demo/README.md index 90469e746d4..078550ae392 100644 --- a/examples/python-helm-demo/README.md +++ b/examples/python-helm-demo/README.md @@ -3,87 +3,168 @@ For this tutorial, we set up Feast with Redis. -We use the Feast CLI to register and materialize features, and then retrieving via a Feast Python feature server deployed in Kubernetes +We use the Feast CLI to register and materialize features from the current machine, and then retrieving via a +Feast Python feature server deployed in Kubernetes ## First, let's set up a Redis cluster 1. Start minikube (`minikube start`) -2. Use helm to install a default Redis cluster +1. Use helm to install a default Redis cluster ```bash helm repo add bitnami https://charts.bitnami.com/bitnami helm repo update helm install my-redis bitnami/redis ``` ![](redis-screenshot.png) -3. Port forward Redis so we can materialize features to it +1. Port forward Redis so we can materialize features to it ```bash kubectl port-forward --namespace default svc/my-redis-master 6379:6379 ``` -4. Get your Redis password using the command (pasted below for convenience). We'll need this to tell Feast how to communicate with the cluster. +1. Get your Redis password using the command (pasted below for convenience). We'll need this to tell Feast how to communicate with the cluster. ```bash export REDIS_PASSWORD=$(kubectl get secret --namespace default my-redis -o jsonpath="{.data.redis-password}" | base64 --decode) echo $REDIS_PASSWORD ``` +## Then, let's set up a MinIO S3 store +Manifests have been taken from [Deploy Minio in your project](https://ai-on-openshift.io/tools-and-applications/minio/minio/#deploy-minio-in-your-project). + +1. Deploy MinIO instance: + ``` + kubectl apply -f minio-dev.yaml + ``` + +1. Forward the UI port: + ```console + kubectl port-forward svc/minio-service 9090:9090 + ``` +1. Login to (localhost:9090)[http://localhost:9090] as `minio`/`minio123` and create bucket called `feast-demo`. +1. Stop previous port forwarding and forward the API port instead: + ```console + kubectl port-forward svc/minio-service 9000:9000 + ``` + ## Next, we setup a local Feast repo -1. Install Feast with Redis dependencies `pip install "feast[redis]"` -2. Make a bucket in GCS (or S3) -3. The feature repo is already setup here, so you just need to swap in your GCS bucket and Redis credentials. - We need to modify the `feature_store.yaml`, which has two fields for you to replace: +1. Install Feast with Redis dependencies `pip install "feast[redis,aws]"` +1. The feature repo is already setup here, so you just need to swap in your Redis credentials. + We need to modify the `feature_store.yaml`, which has one field for you to replace: + ```console + sed "s/_REDIS_PASSWORD_/${REDIS_PASSWORD}/" feature_repo/feature_store.yaml.template > feature_repo/feature_store.yaml + cat feature_repo/feature_store.yaml + ``` + + Example repo: ```yaml - registry: gs://[YOUR GCS BUCKET]/demo-repo/registry.db + registry: s3://localhost:9000/feast-demo/registry.db project: feast_python_demo - provider: gcp + provider: local online_store: type: redis - # Note: this would normally be using instance URL's to access Redis - connection_string: localhost:6379,password=[YOUR PASSWORD] + connection_string: localhost:6379,password=**** offline_store: type: file entity_key_serialization_version: 2 ``` -4. Run `feast apply` from within the `feature_repo` directory to apply your local features to the remote registry - - Note: you may need to authenticate to gcloud first with `gcloud auth login` -5. Materialize features to the online store: +1. To run `feast apply` from the current machine we need to define the AWS credentials to connect the MinIO S3 store, which +are defined in [minio.env](./minio.env): + ```console + source minio.env + cd feature_repo + feast apply + ``` +1. Let's validate the setup by running some queries + ```console + feast entities list + feast feature-views list + ``` +1. Materialize features to the online store: ```bash + cd feature_repo CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S") feast materialize-incremental $CURRENT_TIME ``` ## Now let's setup the Feast Server -1. Add the gcp-auth addon to mount GCP credentials: - ```bash - minikube addons enable gcp-auth - ``` -2. Add Feast's Python/Go feature server chart repo +1. Add Feast's Python feature server chart repo ```bash helm repo add feast-charts https://feast-helm-charts.storage.googleapis.com helm repo update ``` -3. For this tutorial, because we don't have a direct hosted endpoint into Redis, we need to change `feature_store.yaml` to talk to the Kubernetes Redis service - ```bash - sed -i '' 's/localhost:6379/my-redis-master:6379/g' feature_store.yaml - ``` -4. Install the Feast helm chart: `helm install feast-release feast-charts/feast-feature-server --set feature_store_yaml_base64=$(base64 feature_store.yaml)` - > **Dev instructions**: if you're changing the java logic or chart, you can do - 1. `eval $(minikube docker-env)` - 2. `make build-feature-server-dev` - 3. `helm install feast-release ../../../infra/charts/feast-feature-server --set image.tag=dev --set feature_store_yaml_base64=$(base64 feature_store.yaml)` -5. (Optional): check logs of the server to make sure it’s working +1. For this tutorial, we'll use a predefined configuration where we just needs to inject the Redis service password: + ```console + sed "s/_REDIS_PASSWORD_/$REDIS_PASSWORD/" online_feature_store.yaml.template > online_feature_store.yaml + cat online_feature_store.yaml + ``` + As you see, the connection points to `my-redis-master:6379` instead of `localhost:6379`. + +1. Install the Feast helm chart: + ```console + helm upgrade --install feast-online feast-charts/feast-feature-server \ + --set fullnameOverride=online-server --set feast_mode=online \ + --set feature_store_yaml_base64=$(base64 -i 'online_feature_store.yaml') + ``` +1. Patch the deployment to include MinIO settings: + ```console + kubectl patch deployment online-server --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/env/-", + "value": { + "name": "AWS_ACCESS_KEY_ID", + "value": "minio" + } + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/env/-", + "value": { + "name": "AWS_SECRET_ACCESS_KEY", + "value": "minio123" + } + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/env/-", + "value": { + "name": "AWS_DEFAULT_REGION", + "value": "default" + } + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/env/-", + "value": { + "name": "FEAST_S3_ENDPOINT_URL", + "value": "http://minio-service:9000" + } + } + ]' + kubectl wait --for=condition=available deployment/online-server --timeout=2m + ``` +1. (Optional): check logs of the server to make sure it’s working ```bash - kubectl logs svc/feast-release-feast-feature-server + kubectl logs svc/online-server ``` -6. Port forward to expose the grpc endpoint: +1. Port forward to expose the grpc endpoint: ```bash - kubectl port-forward svc/feast-release-feast-feature-server 6566:80 + kubectl port-forward svc/online-server 6566:80 ``` -7. Run test fetches for online features:8. - - First: change back the Redis connection string to allow localhost connections to Redis +1. Run test fetches for online features:8. ```bash - sed -i '' 's/my-redis-master:6379/localhost:6379/g' feature_store.yaml + source minio.env + cd test + python test_python_fetch.py ``` - - Then run the included fetch script, which fetches both via the HTTP endpoint and for comparison, via the Python SDK - ```bash - python test_python_fetch.py + + Output example: + ```console + --- Online features with SDK --- + WARNING:root:_list_feature_views will make breaking changes. Please use _list_batch_feature_views instead. _list_feature_views will behave like _list_all_feature_views in the future. + conv_rate : [0.6799587607383728, 0.9761165976524353] + driver_id : [1001, 1002] + + --- Online features with HTTP endpoint --- + conv_rate : [0.67995876 0.9761166 ] + driver_id : [1001 1002] ``` \ No newline at end of file diff --git a/examples/python-helm-demo/feature_repo/data/driver_stats_with_string.parquet b/examples/python-helm-demo/feature_repo/data/driver_stats_with_string.parquet index 83b8c31aa51..ae8f17e45d3 100644 Binary files a/examples/python-helm-demo/feature_repo/data/driver_stats_with_string.parquet and b/examples/python-helm-demo/feature_repo/data/driver_stats_with_string.parquet differ diff --git a/examples/python-helm-demo/feature_repo/feature_store.yaml b/examples/python-helm-demo/feature_repo/feature_store.yaml deleted file mode 100644 index d49c0cbd0eb..00000000000 --- a/examples/python-helm-demo/feature_repo/feature_store.yaml +++ /dev/null @@ -1,10 +0,0 @@ -registry: gs://[YOUR GCS BUCKET]/demo-repo/registry.db -project: feast_python_demo -provider: gcp -online_store: - type: redis - # Note: this would normally be using instance URL's to access Redis - connection_string: localhost:6379,password=[YOUR PASSWORD] -offline_store: - type: file -entity_key_serialization_version: 2 \ No newline at end of file diff --git a/examples/python-helm-demo/feature_repo/feature_store.yaml.template b/examples/python-helm-demo/feature_repo/feature_store.yaml.template new file mode 100644 index 00000000000..585ba23e63b --- /dev/null +++ b/examples/python-helm-demo/feature_repo/feature_store.yaml.template @@ -0,0 +1,9 @@ +registry: s3://feast-demo/registry.db +project: feast_python_demo +provider: local +online_store: + type: redis + connection_string: localhost:6379,password=_REDIS_PASSWORD_ +offline_store: + type: file +entity_key_serialization_version: 2 \ No newline at end of file diff --git a/examples/python-helm-demo/minio-dev.yaml b/examples/python-helm-demo/minio-dev.yaml new file mode 100644 index 00000000000..9285cbca983 --- /dev/null +++ b/examples/python-helm-demo/minio-dev.yaml @@ -0,0 +1,128 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: minio-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeMode: Filesystem +--- +kind: Secret +apiVersion: v1 +metadata: + name: minio-secret +stringData: + # change the username and password to your own values. + # ensure that the user is at least 3 characters long and the password at least 8 + minio_root_user: minio + minio_root_password: minio123 +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: minio +spec: + replicas: 1 + selector: + matchLabels: + app: minio + template: + metadata: + labels: + app: minio + spec: + volumes: + - name: data + persistentVolumeClaim: + claimName: minio-pvc + containers: + - resources: + limits: + cpu: 250m + memory: 1Gi + requests: + cpu: 20m + memory: 100Mi + readinessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: /dev/termination-log + name: minio + livenessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: minio-secret + key: minio_root_user + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: minio-secret + key: minio_root_password + ports: + - containerPort: 9000 + protocol: TCP + - containerPort: 9090 + protocol: TCP + imagePullPolicy: IfNotPresent + volumeMounts: + - name: data + mountPath: /data + subPath: minio + terminationMessagePolicy: File + image: >- + quay.io/minio/minio:RELEASE.2023-06-19T19-52-50Z + args: + - server + - /data + - --console-address + - :9090 + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + strategy: + type: Recreate + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 +--- +kind: Service +apiVersion: v1 +metadata: + name: minio-service +spec: + ipFamilies: + - IPv4 + ports: + - name: api + protocol: TCP + port: 9000 + targetPort: 9000 + - name: ui + protocol: TCP + port: 9090 + targetPort: 9090 + internalTrafficPolicy: Cluster + type: ClusterIP + ipFamilyPolicy: SingleStack + sessionAffinity: None + selector: + app: minio \ No newline at end of file diff --git a/examples/python-helm-demo/minio.env b/examples/python-helm-demo/minio.env new file mode 100644 index 00000000000..b19ec5083f5 --- /dev/null +++ b/examples/python-helm-demo/minio.env @@ -0,0 +1,7 @@ +export AWS_ACCESS_KEY_ID=minio +export AWS_DEFAULT_REGION=default +#export AWS_S3_BUCKET=feast-demo +#export AWS_S3_ENDPOINT=http://localhost:9000 +export FEAST_S3_ENDPOINT_URL=http://localhost:9000 +export AWS_SECRET_ACCESS_KEY=minio123 + diff --git a/examples/python-helm-demo/online_feature_store.yaml.template b/examples/python-helm-demo/online_feature_store.yaml.template new file mode 100644 index 00000000000..7acb9582c51 --- /dev/null +++ b/examples/python-helm-demo/online_feature_store.yaml.template @@ -0,0 +1,7 @@ +project: feast_python_demo +provider: local +registry: s3://feast-demo/registry.db +online_store: + type: redis + connection_string: my-redis-master:6379,password=_REDIS_PASSWORD_ +entity_key_serialization_version: 2 \ No newline at end of file diff --git a/examples/python-helm-demo/test/feature_store.yaml b/examples/python-helm-demo/test/feature_store.yaml new file mode 100644 index 00000000000..13e99873ee7 --- /dev/null +++ b/examples/python-helm-demo/test/feature_store.yaml @@ -0,0 +1,7 @@ +registry: s3://feast-demo/registry.db +project: feast_python_demo +provider: local +online_store: + path: http://localhost:6566 + type: remote +entity_key_serialization_version: 2 \ No newline at end of file diff --git a/examples/python-helm-demo/feature_repo/test_python_fetch.py b/examples/python-helm-demo/test/test_python_fetch.py similarity index 73% rename from examples/python-helm-demo/feature_repo/test_python_fetch.py rename to examples/python-helm-demo/test/test_python_fetch.py index f9c7c62f4fd..715912422f3 100644 --- a/examples/python-helm-demo/feature_repo/test_python_fetch.py +++ b/examples/python-helm-demo/test/test_python_fetch.py @@ -1,6 +1,7 @@ from feast import FeatureStore import requests import json +import pandas as pd def run_demo_http(): @@ -14,7 +15,14 @@ def run_demo_http(): r = requests.post( "http://localhost:6566/get-online-features", data=json.dumps(online_request) ) - print(json.dumps(r.json(), indent=4, sort_keys=True)) + + resp_data = json.loads(r.text) + records = pd.DataFrame.from_records( + columns=resp_data["metadata"]["feature_names"], + data=[[r["values"][i] for r in resp_data["results"]] for i in range(len(resp_data["results"]))] + ) + for col in sorted(records.columns): + print(col, " : ", records[col].values) def run_demo_sdk(): diff --git a/examples/quickstart/quickstart.ipynb b/examples/quickstart/quickstart.ipynb index 9082baa48dd..5604cc25540 100644 --- a/examples/quickstart/quickstart.ipynb +++ b/examples/quickstart/quickstart.ipynb @@ -1,1103 +1,1102 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "p5JTeKfCVBZf" - }, - "source": [ - "# Overview\n", - "\n", - "In this tutorial, we'll use Feast to generate training data and power online model inference for a \n", - "ride-sharing driver satisfaction prediction model. Feast solves several common issues in this flow:\n", - "\n", - "1. **Training-serving skew and complex data joins:** Feature values often exist across multiple tables. Joining \n", - " these datasets can be complicated, slow, and error-prone.\n", - " * Feast joins these tables with battle-tested logic that ensures _point-in-time_ correctness so future feature \n", - " values do not leak to models.\n", - "2. **Online feature availability:** At inference time, models often need access to features that aren't readily \n", - " available and need to be precomputed from other data sources.\n", - " * Feast manages deployment to a variety of online stores (e.g. DynamoDB, Redis, Google Cloud Datastore) and \n", - " ensures necessary features are consistently _available_ and _freshly computed_ at inference time.\n", - "3. **Feature and model versioning:** Different teams within an organization are often unable to reuse \n", - " features across projects, resulting in duplicate feature creation logic. Models have data dependencies that need \n", - " to be versioned, for example when running A/B tests on model versions.\n", - " * Feast enables discovery of and collaboration on previously used features and enables versioning of sets of \n", - " features (via _feature services_).\n", - " * _(Experimental)_ Feast enables light-weight feature transformations so users can re-use transformation logic \n", - " across online / offline use cases and across models.\n", - "\n", - "We will:\n", - "1. Deploy a local feature store with a **Parquet file offline store** and **Sqlite online store**.\n", - "2. Build a training dataset using our time series features from our **Parquet files**.\n", - "3. Materialize feature values from the offline store into the online store.\n", - "4. Read the latest features from the online store for inference." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9_Y997DzvOMI" - }, - "source": [ - "## Step 1: Install Feast\n", - "\n", - "Install Feast (and Pygments for pretty printing) using pip:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rXNMAAJKQPG5" - }, - "outputs": [], - "source": [ - "%%sh\n", - "pip install feast -U -q\n", - "pip install Pygments -q\n", - "echo \"Please restart your runtime now (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded.\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": false, - "id": "sOX_LwjaAhKz" - }, - "source": [ - "**Reminder**: Please restart your runtime after installing Feast (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OZetvs5xx4GP" - }, - "source": [ - "## Step 2: Create a feature repository\n", - "\n", - "A feature repository is a directory that contains the configuration of the feature store and individual features. This configuration is written as code (Python/YAML) and it's highly recommended that teams track it centrally using git. See [Feature Repository](https://docs.feast.dev/reference/feature-repository) for a detailed explanation of feature repositories.\n", - "\n", - "The easiest way to create a new feature repository to use the `feast init` command. This creates a scaffolding with initial demo data.\n", - "\n", - "### Demo data scenario \n", - "- We have surveyed some drivers for how satisfied they are with their experience in a ride-sharing app. \n", - "- We want to generate predictions for driver satisfaction for the rest of the users so we can reach out to potentially dissatisfied users." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "IhirSkgUvYau", - "outputId": "664367b9-6a2a-493d-fd78-6495fb459fa2" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Creating a new Feast repository in \u001b[1m\u001b[32m/content/feature_repo\u001b[0m.\n", - "\n" - ] - } - ], - "source": [ - "!feast init feature_repo" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OdTASZPvyKCe" - }, - "source": [ - "### Step 2a: Inspecting the feature repository\n", - "\n", - "Let's take a look at the demo repo itself. It breaks down into\n", - "\n", - "\n", - "* `data/` contains raw demo parquet data\n", - "* `example_repo.py` contains demo feature definitions\n", - "* `feature_store.yaml` contains a demo setup configuring where data sources are\n", - "* `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features.\n", - " * You can run this with `python test_workflow.py`.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "9jXuzt4ovzA3", - "outputId": "9e326892-f0cc-4d86-d0b2-f33f822f83a9" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/content/feature_repo\n", - "README.md feature_store.yaml\n", - "__init__.py example_repo.py test_workflow.py\n", - "\n", - "./data:\n", - "driver_stats.parquet\n" - ] - } - ], - "source": [ - "%cd feature_repo/feature_repo\n", - "!ls -R" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MJk_WNsbeUP6" - }, - "source": [ - "### Step 2b: Inspecting the project configuration\n", - "Let's inspect the setup of the project in `feature_store.yaml`. \n", - "\n", - "The key line defining the overall architecture of the feature store is the **provider**. \n", - "\n", - "The provider value sets default offline and online stores. \n", - "* The offline store provides the compute layer to process historical data (for generating training data & feature \n", - " values for serving). \n", - "* The online store is a low latency store of the latest feature values (for powering real-time inference).\n", - "\n", - "Valid values for `provider` in `feature_store.yaml` are:\n", - "\n", - "* local: use file source with SQLite/Redis\n", - "* gcp: use BigQuery/Snowflake with Google Cloud Datastore/Redis\n", - "* aws: use Redshift/Snowflake with DynamoDB/Redis\n", - "\n", - "Note that there are many other offline / online stores Feast works with, including Azure, Hive, Trino, and PostgreSQL via community plugins. See https://docs.feast.dev/roadmap for all supported connectors.\n", - "\n", - "A custom setup can also be made by following [Customizing Feast](https://docs.feast.dev/v/master/how-to-guides/customizing-feast)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "9_YJ--uYdtcP", - "outputId": "af56a8da-9ca2-4dd9-f73c-a60dd3e1613a" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[94mproject\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mfeature_repo\u001b[37m\u001b[39;49;00m\n", - "\u001b[37m# By default, the registry is a file (but can be turned into a more scalable SQL-backed registry)\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", - "\u001b[94mregistry\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/registry.db\u001b[37m\u001b[39;49;00m\n", - "\u001b[37m# The provider primarily specifies default offline / online stores & storing the registry in a given cloud\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", - "\u001b[94mprovider\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mlocal\u001b[37m\u001b[39;49;00m\n", - "\u001b[94monline_store\u001b[39;49;00m:\u001b[37m\u001b[39;49;00m\n", - "\u001b[37m \u001b[39;49;00m\u001b[94mpath\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/online_store.db\u001b[37m\u001b[39;49;00m\n", - "\u001b[94mentity_key_serialization_version\u001b[39;49;00m:\u001b[37m \u001b[39;49;00m2\u001b[37m\u001b[39;49;00m\n" - ] - } - ], - "source": [ - "!pygmentize feature_store.yaml" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FnMlk4zshywp" - }, - "source": [ - "### Inspecting the raw data\n", - "\n", - "The raw feature data we have in this demo is stored in a local parquet file. The dataset captures hourly stats of a driver in a ride-sharing app." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 423 - }, - "id": "sIF2lO59dwzi", - "outputId": "8931930b-b32f-43e1-d45b-de230489c7b8" - }, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
event_timestampdriver_idconv_rateacc_rateavg_daily_tripscreated
02022-07-24 14:00:00+00:0010050.4239130.0828312012022-08-08 14:14:11.200
12022-07-24 15:00:00+00:0010050.5071260.4274706902022-08-08 14:14:11.200
22022-07-24 16:00:00+00:0010050.1398100.1297438452022-08-08 14:14:11.200
32022-07-24 17:00:00+00:0010050.3835740.0717288392022-08-08 14:14:11.200
42022-07-24 18:00:00+00:0010050.9591310.44005122022-08-08 14:14:11.200
.....................
18022022-08-08 12:00:00+00:0010010.9948830.0201456502022-08-08 14:14:11.200
18032022-08-08 13:00:00+00:0010010.6638440.8646393592022-08-08 14:14:11.200
18042021-04-12 07:00:00+00:0010010.0686960.6249776242022-08-08 14:14:11.200
18052022-08-01 02:00:00+00:0010030.9808690.2444207902022-08-08 14:14:11.200
18062022-08-01 02:00:00+00:0010030.9808690.2444207902022-08-08 14:14:11.200
\n", - "

1807 rows × 6 columns

\n", - "
" - ], - "text/plain": [ - " event_timestamp driver_id conv_rate acc_rate \\\n", - "0 2022-07-24 14:00:00+00:00 1005 0.423913 0.082831 \n", - "1 2022-07-24 15:00:00+00:00 1005 0.507126 0.427470 \n", - "2 2022-07-24 16:00:00+00:00 1005 0.139810 0.129743 \n", - "3 2022-07-24 17:00:00+00:00 1005 0.383574 0.071728 \n", - "4 2022-07-24 18:00:00+00:00 1005 0.959131 0.440051 \n", - "... ... ... ... ... \n", - "1802 2022-08-08 12:00:00+00:00 1001 0.994883 0.020145 \n", - "1803 2022-08-08 13:00:00+00:00 1001 0.663844 0.864639 \n", - "1804 2021-04-12 07:00:00+00:00 1001 0.068696 0.624977 \n", - "1805 2022-08-01 02:00:00+00:00 1003 0.980869 0.244420 \n", - "1806 2022-08-01 02:00:00+00:00 1003 0.980869 0.244420 \n", - "\n", - " avg_daily_trips created \n", - "0 201 2022-08-08 14:14:11.200 \n", - "1 690 2022-08-08 14:14:11.200 \n", - "2 845 2022-08-08 14:14:11.200 \n", - "3 839 2022-08-08 14:14:11.200 \n", - "4 2 2022-08-08 14:14:11.200 \n", - "... ... ... \n", - "1802 650 2022-08-08 14:14:11.200 \n", - "1803 359 2022-08-08 14:14:11.200 \n", - "1804 624 2022-08-08 14:14:11.200 \n", - "1805 790 2022-08-08 14:14:11.200 \n", - "1806 790 2022-08-08 14:14:11.200 \n", - "\n", - "[1807 rows x 6 columns]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import pandas as pd\n", - "\n", - "pd.read_parquet(\"data/driver_stats.parquet\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rRL8-ubWzUFy" - }, - "source": [ - "## Step 3: Register feature definitions and deploy your feature store\n", - "\n", - "`feast apply` scans python files in the current directory for feature/entity definitions and deploys infrastructure according to `feature_store.yaml`.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5NS4INL5n7ze" - }, - "source": [ - "### Step 3a: Inspecting feature definitions\n", - "Let's inspect what `example_repo.py` looks like:\n", - "\n", - "```python\n", - "# This is an example feature definition file\n", - "\n", - "from datetime import timedelta\n", - "\n", - "import pandas as pd\n", - "\n", - "from feast import Entity, FeatureService, FeatureView, Field, FileSource, RequestSource, PushSource\n", - "from feast.on_demand_feature_view import on_demand_feature_view\n", - "from feast.types import Float32, Int64, Float64\n", - "\n", - "# Read data from parquet files. Parquet is convenient for local development mode. For\n", - "# production, you can use your favorite DWH, such as BigQuery. See Feast documentation\n", - "# for more info.\n", - "driver_hourly_stats = FileSource(\n", - " name=\"driver_hourly_stats_source\",\n", - " path=\"/content/feature_repo/data/driver_stats.parquet\",\n", - " timestamp_field=\"event_timestamp\",\n", - " created_timestamp_column=\"created\",\n", - ")\n", - "\n", - "# Define an entity for the driver. You can think of entity as a primary key used to\n", - "# fetch features.\n", - "driver = Entity(name=\"driver\", join_keys=[\"driver_id\"])\n", - "\n", - "# Our parquet files contain sample data that includes a driver_id column, timestamps and\n", - "# three feature column. Here we define a Feature View that will allow us to serve this\n", - "# data to our model online.\n", - "driver_hourly_stats_view = FeatureView(\n", - " name=\"driver_hourly_stats\",\n", - " entities=[driver],\n", - " ttl=timedelta(days=1),\n", - " schema=[\n", - " Field(name=\"conv_rate\", dtype=Float32),\n", - " Field(name=\"acc_rate\", dtype=Float32),\n", - " Field(name=\"avg_daily_trips\", dtype=Int64),\n", - " ],\n", - " online=True,\n", - " source=driver_hourly_stats,\n", - " tags={},\n", - ")\n", - "\n", - "# Defines a way to push data (to be available offline, online or both) into Feast.\n", - "driver_stats_push_source = PushSource(\n", - " name=\"driver_stats_push_source\",\n", - " batch_source=driver_hourly_stats,\n", - ")\n", - "\n", - "# Define a request data source which encodes features / information only\n", - "# available at request time (e.g. part of the user initiated HTTP request)\n", - "input_request = RequestSource(\n", - " name=\"vals_to_add\",\n", - " schema=[\n", - " Field(name=\"val_to_add\", dtype=Int64),\n", - " Field(name=\"val_to_add_2\", dtype=Int64),\n", - " ],\n", - ")\n", - "\n", - "\n", - "# Define an on demand feature view which can generate new features based on\n", - "# existing feature views and RequestSource features\n", - "@on_demand_feature_view(\n", - " sources=[driver_hourly_stats_view, input_request],\n", - " schema=[\n", - " Field(name=\"conv_rate_plus_val1\", dtype=Float64),\n", - " Field(name=\"conv_rate_plus_val2\", dtype=Float64),\n", - " ],\n", - ")\n", - "def transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame:\n", - " df = pd.DataFrame()\n", - " df[\"conv_rate_plus_val1\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add\"]\n", - " df[\"conv_rate_plus_val2\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add_2\"]\n", - " return df\n", - "\n", - "\n", - "# This groups features into a model version\n", - "driver_stats_fs = FeatureService(\n", - " name=\"driver_activity_v1\", features=[driver_hourly_stats_view, transformed_conv_rate]\n", - ")\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "im_cc5HdoDno" - }, - "source": [ - "### Step 3b: Applying feature definitions\n", - "Now we run `feast apply` to register the feature views and entities defined in `example_repo.py`, and sets up SQLite online store tables. Note that we had previously specified SQLite as the online store in `feature_store.yaml` by specifying a `local` provider." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "RYKCKKrcxYZG", - "outputId": "f34aa509-1dc6-4e50-e8ee-12897138f3b9" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "RuntimeWarning: On demand feature view is an experimental feature. This API is stable, but the functionality does not scale well for offline retrieval\n", - " warnings.warn(\n", - "Created entity \u001b[1m\u001b[32mdriver\u001b[0m\n", - "Created feature view \u001b[1m\u001b[32mdriver_hourly_stats\u001b[0m\n", - "Created on demand feature view \u001b[1m\u001b[32mtransformed_conv_rate\u001b[0m\n", - "Created feature service \u001b[1m\u001b[32mdriver_activity_v1\u001b[0m\n", - "\n", - "Created sqlite table \u001b[1m\u001b[32mfeature_repo_driver_hourly_stats\u001b[0m\n", - "\n" - ] - } - ], - "source": [ - "!feast apply" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uV7rtRQgzyf0" - }, - "source": [ - "## Step 4: Generating training data or powering batch scoring models\n", - "\n", - "To train a model, we need features and labels. Often, this label data is stored separately (e.g. you have one table storing user survey results and another set of tables with feature values). Feast can help generate the features that map to these labels.\n", - "\n", - "Feast needs a list of **entities** (e.g. driver ids) and **timestamps**. Feast will intelligently join relevant \n", - "tables to create the relevant feature vectors. There are two ways to generate this list:\n", - "1. The user can query that table of labels with timestamps and pass that into Feast as an _entity dataframe_ for \n", - "training data generation. \n", - "2. The user can also query that table with a *SQL query* which pulls entities. See the documentation on [feature retrieval](https://docs.feast.dev/getting-started/concepts/feature-retrieval) for details \n", - "\n", - "* Note that we include timestamps because we want the features for the same driver at various timestamps to be used in a model.\n", - "\n", - "### Step 4a: Generating training data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "C6Fzia7YwBzz", - "outputId": "58c4c3dd-7a10-4f56-901d-1bb879ebbcb8" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "----- Feature schema -----\n", - "\n", - "\n", - "RangeIndex: 3 entries, 0 to 2\n", - "Data columns (total 10 columns):\n", - " # Column Non-Null Count Dtype \n", - "--- ------ -------------- ----- \n", - " 0 driver_id 3 non-null int64 \n", - " 1 event_timestamp 3 non-null datetime64[ns, UTC]\n", - " 2 label_driver_reported_satisfaction 3 non-null int64 \n", - " 3 val_to_add 3 non-null int64 \n", - " 4 val_to_add_2 3 non-null int64 \n", - " 5 conv_rate 3 non-null float32 \n", - " 6 acc_rate 3 non-null float32 \n", - " 7 avg_daily_trips 3 non-null int32 \n", - " 8 conv_rate_plus_val1 3 non-null float64 \n", - " 9 conv_rate_plus_val2 3 non-null float64 \n", - "dtypes: datetime64[ns, UTC](1), float32(2), float64(2), int32(1), int64(4)\n", - "memory usage: 332.0 bytes\n", - "None\n", - "\n", - "----- Example features -----\n", - "\n", - " driver_id event_timestamp label_driver_reported_satisfaction \\\n", - "0 1001 2021-04-12 10:59:42+00:00 1 \n", - "1 1002 2021-04-12 08:12:10+00:00 5 \n", - "2 1003 2021-04-12 16:40:26+00:00 3 \n", - "\n", - " val_to_add val_to_add_2 conv_rate acc_rate avg_daily_trips \\\n", - "0 1 10 0.356766 0.051319 93 \n", - "1 2 20 0.130452 0.359439 522 \n", - "2 3 30 0.666570 0.343380 266 \n", - "\n", - " conv_rate_plus_val1 conv_rate_plus_val2 \n", - "0 1.356766 10.356766 \n", - "1 2.130452 20.130452 \n", - "2 3.666570 30.666570 \n" - ] - } - ], - "source": [ - "from datetime import datetime\n", - "import pandas as pd\n", - "\n", - "from feast import FeatureStore\n", - "\n", - "# The entity dataframe is the dataframe we want to enrich with feature values\n", - "# Note: see https://docs.feast.dev/getting-started/concepts/feature-retrieval for more details on how to retrieve\n", - "# for all entities in the offline store instead\n", - "entity_df = pd.DataFrame.from_dict(\n", - " {\n", - " # entity's join key -> entity values\n", - " \"driver_id\": [1001, 1002, 1003],\n", - " # \"event_timestamp\" (reserved key) -> timestamps\n", - " \"event_timestamp\": [\n", - " datetime(2021, 4, 12, 10, 59, 42),\n", - " datetime(2021, 4, 12, 8, 12, 10),\n", - " datetime(2021, 4, 12, 16, 40, 26),\n", - " ],\n", - " # (optional) label name -> label values. Feast does not process these\n", - " \"label_driver_reported_satisfaction\": [1, 5, 3],\n", - " # values we're using for an on-demand transformation\n", - " \"val_to_add\": [1, 2, 3],\n", - " \"val_to_add_2\": [10, 20, 30],\n", - " }\n", - ")\n", - "\n", - "store = FeatureStore(repo_path=\".\")\n", - "\n", - "training_df = store.get_historical_features(\n", - " entity_df=entity_df,\n", - " features=[\n", - " \"driver_hourly_stats:conv_rate\",\n", - " \"driver_hourly_stats:acc_rate\",\n", - " \"driver_hourly_stats:avg_daily_trips\",\n", - " \"transformed_conv_rate:conv_rate_plus_val1\",\n", - " \"transformed_conv_rate:conv_rate_plus_val2\",\n", - " ],\n", - ").to_df()\n", - "\n", - "print(\"----- Feature schema -----\\n\")\n", - "print(training_df.info())\n", - "\n", - "print()\n", - "print(\"----- Example features -----\\n\")\n", - "print(training_df.head())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GFiXVdhz04t0" - }, - "source": [ - "### Step 4b: Run offline inference (batch scoring)\n", - "To power a batch model, we primarily need to generate features with the `get_historical_features` call, but using the current timestamp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rGR_xgIs04t0", - "outputId": "3496e5a1-79ff-4f3c-e35d-22b594992708" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "----- Example features -----\n", - "\n", - " driver_id event_timestamp \\\n", - "0 1001 2022-08-08 18:22:06.555018+00:00 \n", - "1 1002 2022-08-08 18:22:06.555018+00:00 \n", - "2 1003 2022-08-08 18:22:06.555018+00:00 \n", - "\n", - " label_driver_reported_satisfaction val_to_add val_to_add_2 conv_rate \\\n", - "0 1 1 10 0.663844 \n", - "1 5 2 20 0.151189 \n", - "2 3 3 30 0.769165 \n", - "\n", - " acc_rate avg_daily_trips conv_rate_plus_val1 conv_rate_plus_val2 \n", - "0 0.864639 359 1.663844 10.663844 \n", - "1 0.695982 311 2.151189 20.151189 \n", - "2 0.949191 789 3.769165 30.769165 \n" - ] - } - ], - "source": [ - "entity_df[\"event_timestamp\"] = pd.to_datetime(\"now\", utc=True)\n", - "training_df = store.get_historical_features(\n", - " entity_df=entity_df,\n", - " features=[\n", - " \"driver_hourly_stats:conv_rate\",\n", - " \"driver_hourly_stats:acc_rate\",\n", - " \"driver_hourly_stats:avg_daily_trips\",\n", - " \"transformed_conv_rate:conv_rate_plus_val1\",\n", - " \"transformed_conv_rate:conv_rate_plus_val2\",\n", - " ],\n", - ").to_df()\n", - "\n", - "print(\"\\n----- Example features -----\\n\")\n", - "print(training_df.head())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ngl7HCtmz3hG" - }, - "source": [ - "## Step 5: Load features into your online store" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "p5JTeKfCVBZf" + }, + "source": [ + "# Overview\n", + "\n", + "In this tutorial, we'll use Feast to generate training data and power online model inference for a \n", + "ride-sharing driver satisfaction prediction model. Feast solves several common issues in this flow:\n", + "\n", + "1. **Training-serving skew and complex data joins:** Feature values often exist across multiple tables. Joining \n", + " these datasets can be complicated, slow, and error-prone.\n", + " * Feast joins these tables with battle-tested logic that ensures _point-in-time_ correctness so future feature \n", + " values do not leak to models.\n", + "2. **Online feature availability:** At inference time, models often need access to features that aren't readily \n", + " available and need to be precomputed from other data sources.\n", + " * Feast manages deployment to a variety of online stores (e.g. DynamoDB, Redis, Google Cloud Datastore) and \n", + " ensures necessary features are consistently _available_ and _freshly computed_ at inference time.\n", + "3. **Feature and model versioning:** Different teams within an organization are often unable to reuse \n", + " features across projects, resulting in duplicate feature creation logic. Models have data dependencies that need \n", + " to be versioned, for example when running A/B tests on model versions.\n", + " * Feast enables discovery of and collaboration on previously used features and enables versioning of sets of \n", + " features (via _feature services_).\n", + " * _(Experimental)_ Feast enables light-weight feature transformations so users can re-use transformation logic \n", + " across online / offline use cases and across models.\n", + "\n", + "We will:\n", + "1. Deploy a local feature store with a **Parquet file offline store** and **Sqlite online store**.\n", + "2. Build a training dataset using our time series features from our **Parquet files**.\n", + "3. Materialize feature values from the offline store into the online store.\n", + "4. Read the latest features from the online store for inference." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9_Y997DzvOMI" + }, + "source": [ + "## Step 1: Install Feast\n", + "\n", + "Install Feast using pip:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rXNMAAJKQPG5" + }, + "outputs": [], + "source": [ + "%%sh\n", + "pip install feast -U -q\n", + "echo \"Please restart your runtime now (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded.\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "id": "sOX_LwjaAhKz" + }, + "source": [ + "**Reminder**: Please restart your runtime after installing Feast (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OZetvs5xx4GP" + }, + "source": [ + "## Step 2: Create a feature repository\n", + "\n", + "A feature repository is a directory that contains the configuration of the feature store and individual features. This configuration is written as code (Python/YAML) and it's highly recommended that teams track it centrally using git. See [Feature Repository](https://docs.feast.dev/reference/feature-repository) for a detailed explanation of feature repositories.\n", + "\n", + "The easiest way to create a new feature repository to use the `feast init` command. This creates a scaffolding with initial demo data.\n", + "\n", + "### Demo data scenario \n", + "- We have surveyed some drivers for how satisfied they are with their experience in a ride-sharing app. \n", + "- We want to generate predictions for driver satisfaction for the rest of the users so we can reach out to potentially dissatisfied users." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "IhirSkgUvYau", + "outputId": "664367b9-6a2a-493d-fd78-6495fb459fa2" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "KCXUpiQ_pmDk" - }, - "source": [ - "### Step 5a: Using `materialize_incremental`\n", - "\n", - "We now serialize the latest values of features since the beginning of time to prepare for serving. Note, `materialize_incremental` serializes all new features since the last `materialize` call, or since the time provided minus the `ttl` timedelta. In this case, this will be `CURRENT_TIME - 1 day` (`ttl` was set on the `FeatureView` instances in [feature_repo/feature_repo/example_repo.py](feature_repo/feature_repo/example_repo.py)). \n", - "\n", - "```bash\n", - "CURRENT_TIME=$(date -u +\"%Y-%m-%dT%H:%M:%S\")\n", - "feast materialize-incremental $CURRENT_TIME\n", - "```\n", - "\n", - "An alternative to using the CLI command is to use Python:" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Creating a new Feast repository in \u001b[1m\u001b[32m/content/feature_repo\u001b[0m.\n", + "\n" + ] + } + ], + "source": [ + "!feast init feature_repo" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OdTASZPvyKCe" + }, + "source": [ + "### Step 2a: Inspecting the feature repository\n", + "\n", + "Let's take a look at the demo repo itself. It breaks down into\n", + "\n", + "\n", + "* `data/` contains raw demo parquet data\n", + "* `example_repo.py` contains demo feature definitions\n", + "* `feature_store.yaml` contains a demo setup configuring where data sources are\n", + "* `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features.\n", + " * You can run this with `python test_workflow.py`.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "9jXuzt4ovzA3", + "outputId": "9e326892-f0cc-4d86-d0b2-f33f822f83a9" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "7Z6QxIebAhK5", - "outputId": "9b54777d-2dd8-4ec3-b4e7-e3275800a980" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Materializing \u001b[1m\u001b[32m1\u001b[0m feature views to \u001b[1m\u001b[32m2022-08-08 14:19:04-04:00\u001b[0m into the \u001b[1m\u001b[32msqlite\u001b[0m online store.\n", - "\n", - "\u001b[1m\u001b[32mdriver_hourly_stats\u001b[0m from \u001b[1m\u001b[32m2022-08-07 18:19:04-04:00\u001b[0m to \u001b[1m\u001b[32m2022-08-08 14:19:04-04:00\u001b[0m:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 346.47it/s]\n" - ] - } - ], - "source": [ - "from datetime import datetime\n", - "store.materialize_incremental(datetime.now())" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "/content/feature_repo\n", + "README.md feature_store.yaml\n", + "__init__.py example_repo.py test_workflow.py\n", + "\n", + "./data:\n", + "driver_stats.parquet\n" + ] + } + ], + "source": [ + "%cd feature_repo/feature_repo\n", + "!ls -R" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MJk_WNsbeUP6" + }, + "source": [ + "### Step 2b: Inspecting the project configuration\n", + "Let's inspect the setup of the project in `feature_store.yaml`. \n", + "\n", + "The key line defining the overall architecture of the feature store is the **provider**. \n", + "\n", + "The provider value sets default offline and online stores. \n", + "* The offline store provides the compute layer to process historical data (for generating training data & feature \n", + " values for serving). \n", + "* The online store is a low latency store of the latest feature values (for powering real-time inference).\n", + "\n", + "Valid values for `provider` in `feature_store.yaml` are:\n", + "\n", + "* local: use file source with SQLite/Redis\n", + "* gcp: use BigQuery/Snowflake with Google Cloud Datastore/Redis\n", + "* aws: use Redshift/Snowflake with DynamoDB/Redis\n", + "\n", + "Note that there are many other offline / online stores Feast works with, including Azure, Hive, Trino, and PostgreSQL via community plugins. See https://docs.feast.dev/roadmap for all supported connectors.\n", + "\n", + "A custom setup can also be made by following [Customizing Feast](https://docs.feast.dev/v/master/how-to-guides/customizing-feast)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "9_YJ--uYdtcP", + "outputId": "af56a8da-9ca2-4dd9-f73c-a60dd3e1613a" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "l7t12bhH4i9H" - }, - "source": [ - "### Step 5b: Inspect materialized features\n", - "\n", - "Note that now there are `online_store.db` and `registry.db`, which store the materialized features and schema information, respectively." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[94mproject\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mfeature_repo\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m# By default, the registry is a file (but can be turned into a more scalable SQL-backed registry)\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mregistry\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/registry.db\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m# The provider primarily specifies default offline / online stores & storing the registry in a given cloud\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mprovider\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mlocal\u001b[37m\u001b[39;49;00m\n", + "\u001b[94monline_store\u001b[39;49;00m:\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mpath\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/online_store.db\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mentity_key_serialization_version\u001b[39;49;00m:\u001b[37m \u001b[39;49;00m2\u001b[37m\u001b[39;49;00m\n" + ] + } + ], + "source": [ + "!pygmentize feature_store.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FnMlk4zshywp" + }, + "source": [ + "### Inspecting the raw data\n", + "\n", + "The raw feature data we have in this demo is stored in a local parquet file. The dataset captures hourly stats of a driver in a ride-sharing app." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 423 }, + "id": "sIF2lO59dwzi", + "outputId": "8931930b-b32f-43e1-d45b-de230489c7b8" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "aVIgSYhI4cvR", - "outputId": "3c60f99c-2471-4343-83ed-cc60a6a9c3b2" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--- Data directory ---\n", - "driver_stats.parquet online_store.db registry.db\n", - "\n", - "--- Schema of online store ---\n", - "['entity_key', 'feature_name', 'value', 'event_ts', 'created_ts']\n" - ] - } + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event_timestampdriver_idconv_rateacc_rateavg_daily_tripscreated
02022-07-24 14:00:00+00:0010050.4239130.0828312012022-08-08 14:14:11.200
12022-07-24 15:00:00+00:0010050.5071260.4274706902022-08-08 14:14:11.200
22022-07-24 16:00:00+00:0010050.1398100.1297438452022-08-08 14:14:11.200
32022-07-24 17:00:00+00:0010050.3835740.0717288392022-08-08 14:14:11.200
42022-07-24 18:00:00+00:0010050.9591310.44005122022-08-08 14:14:11.200
.....................
18022022-08-08 12:00:00+00:0010010.9948830.0201456502022-08-08 14:14:11.200
18032022-08-08 13:00:00+00:0010010.6638440.8646393592022-08-08 14:14:11.200
18042021-04-12 07:00:00+00:0010010.0686960.6249776242022-08-08 14:14:11.200
18052022-08-01 02:00:00+00:0010030.9808690.2444207902022-08-08 14:14:11.200
18062022-08-01 02:00:00+00:0010030.9808690.2444207902022-08-08 14:14:11.200
\n", + "

1807 rows × 6 columns

\n", + "
" ], - "source": [ - "print(\"--- Data directory ---\")\n", - "!ls data\n", - "\n", - "import sqlite3\n", - "import pandas as pd\n", - "con = sqlite3.connect(\"data/online_store.db\")\n", - "print(\"\\n--- Schema of online store ---\")\n", - "print(\n", - " pd.read_sql_query(\n", - " \"SELECT * FROM feature_repo_driver_hourly_stats\", con).columns.tolist())\n", - "con.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AWcttaGalzAm" - }, - "source": [ - "### Quick note on entity keys\n", - "Note from the above command that the online store indexes by `entity_key`. \n", - "\n", - "[Entity keys](https://docs.feast.dev/getting-started/concepts/entity#entity-key) include a list of all entities needed (e.g. all relevant primary keys) to generate the feature vector. In this case, this is a serialized version of the `driver_id`. We use this later to fetch all features for a given driver at inference time." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GNecKOaI0J2Z" - }, - "source": [ - "## Step 6: Fetching real-time feature vectors for online inference" + "text/plain": [ + " event_timestamp driver_id conv_rate acc_rate \\\n", + "0 2022-07-24 14:00:00+00:00 1005 0.423913 0.082831 \n", + "1 2022-07-24 15:00:00+00:00 1005 0.507126 0.427470 \n", + "2 2022-07-24 16:00:00+00:00 1005 0.139810 0.129743 \n", + "3 2022-07-24 17:00:00+00:00 1005 0.383574 0.071728 \n", + "4 2022-07-24 18:00:00+00:00 1005 0.959131 0.440051 \n", + "... ... ... ... ... \n", + "1802 2022-08-08 12:00:00+00:00 1001 0.994883 0.020145 \n", + "1803 2022-08-08 13:00:00+00:00 1001 0.663844 0.864639 \n", + "1804 2021-04-12 07:00:00+00:00 1001 0.068696 0.624977 \n", + "1805 2022-08-01 02:00:00+00:00 1003 0.980869 0.244420 \n", + "1806 2022-08-01 02:00:00+00:00 1003 0.980869 0.244420 \n", + "\n", + " avg_daily_trips created \n", + "0 201 2022-08-08 14:14:11.200 \n", + "1 690 2022-08-08 14:14:11.200 \n", + "2 845 2022-08-08 14:14:11.200 \n", + "3 839 2022-08-08 14:14:11.200 \n", + "4 2 2022-08-08 14:14:11.200 \n", + "... ... ... \n", + "1802 650 2022-08-08 14:14:11.200 \n", + "1803 359 2022-08-08 14:14:11.200 \n", + "1804 624 2022-08-08 14:14:11.200 \n", + "1805 790 2022-08-08 14:14:11.200 \n", + "1806 790 2022-08-08 14:14:11.200 \n", + "\n", + "[1807 rows x 6 columns]" ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "pd.read_parquet(\"data/driver_stats.parquet\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rRL8-ubWzUFy" + }, + "source": [ + "## Step 3: Register feature definitions and deploy your feature store\n", + "\n", + "`feast apply` scans python files in the current directory for feature/entity definitions and deploys infrastructure according to `feature_store.yaml`.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5NS4INL5n7ze" + }, + "source": [ + "### Step 3a: Inspecting feature definitions\n", + "Let's inspect what `example_repo.py` looks like:\n", + "\n", + "```python\n", + "# This is an example feature definition file\n", + "\n", + "from datetime import timedelta\n", + "\n", + "import pandas as pd\n", + "\n", + "from feast import Entity, FeatureService, FeatureView, Field, FileSource, RequestSource, PushSource\n", + "from feast.on_demand_feature_view import on_demand_feature_view\n", + "from feast.types import Float32, Int64, Float64\n", + "\n", + "# Read data from parquet files. Parquet is convenient for local development mode. For\n", + "# production, you can use your favorite DWH, such as BigQuery. See Feast documentation\n", + "# for more info.\n", + "driver_hourly_stats = FileSource(\n", + " name=\"driver_hourly_stats_source\",\n", + " path=\"/content/feature_repo/data/driver_stats.parquet\",\n", + " timestamp_field=\"event_timestamp\",\n", + " created_timestamp_column=\"created\",\n", + ")\n", + "\n", + "# Define an entity for the driver. You can think of entity as a primary key used to\n", + "# fetch features.\n", + "driver = Entity(name=\"driver\", join_keys=[\"driver_id\"])\n", + "\n", + "# Our parquet files contain sample data that includes a driver_id column, timestamps and\n", + "# three feature column. Here we define a Feature View that will allow us to serve this\n", + "# data to our model online.\n", + "driver_hourly_stats_view = FeatureView(\n", + " name=\"driver_hourly_stats\",\n", + " entities=[driver],\n", + " ttl=timedelta(days=1),\n", + " schema=[\n", + " Field(name=\"conv_rate\", dtype=Float32),\n", + " Field(name=\"acc_rate\", dtype=Float32),\n", + " Field(name=\"avg_daily_trips\", dtype=Int64),\n", + " ],\n", + " online=True,\n", + " source=driver_hourly_stats,\n", + " tags={},\n", + ")\n", + "\n", + "# Defines a way to push data (to be available offline, online or both) into Feast.\n", + "driver_stats_push_source = PushSource(\n", + " name=\"driver_stats_push_source\",\n", + " batch_source=driver_hourly_stats,\n", + ")\n", + "\n", + "# Define a request data source which encodes features / information only\n", + "# available at request time (e.g. part of the user initiated HTTP request)\n", + "input_request = RequestSource(\n", + " name=\"vals_to_add\",\n", + " schema=[\n", + " Field(name=\"val_to_add\", dtype=Int64),\n", + " Field(name=\"val_to_add_2\", dtype=Int64),\n", + " ],\n", + ")\n", + "\n", + "\n", + "# Define an on demand feature view which can generate new features based on\n", + "# existing feature views and RequestSource features\n", + "@on_demand_feature_view(\n", + " sources=[driver_hourly_stats_view, input_request],\n", + " schema=[\n", + " Field(name=\"conv_rate_plus_val1\", dtype=Float64),\n", + " Field(name=\"conv_rate_plus_val2\", dtype=Float64),\n", + " ],\n", + ")\n", + "def transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame:\n", + " df = pd.DataFrame()\n", + " df[\"conv_rate_plus_val1\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add\"]\n", + " df[\"conv_rate_plus_val2\"] = inputs[\"conv_rate\"] + inputs[\"val_to_add_2\"]\n", + " return df\n", + "\n", + "\n", + "# This groups features into a model version\n", + "driver_stats_fs = FeatureService(\n", + " name=\"driver_activity_v1\", features=[driver_hourly_stats_view, transformed_conv_rate]\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "im_cc5HdoDno" + }, + "source": [ + "### Step 3b: Applying feature definitions\n", + "Now we run `feast apply` to register the feature views and entities defined in `example_repo.py`, and sets up SQLite online store tables. Note that we had previously specified SQLite as the online store in `feature_store.yaml` by specifying a `local` provider." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "RYKCKKrcxYZG", + "outputId": "f34aa509-1dc6-4e50-e8ee-12897138f3b9" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "TBFlKRsOAhK8" - }, - "source": [ - "At inference time, we need to quickly read the latest feature values for different drivers (which otherwise might have existed only in batch sources) from the online feature store using `get_online_features()`. These feature vectors can then be fed to the model." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "RuntimeWarning: On demand feature view is an experimental feature. This API is stable, but the functionality does not scale well for offline retrieval\n", + " warnings.warn(\n", + "Created entity \u001b[1m\u001b[32mdriver\u001b[0m\n", + "Created feature view \u001b[1m\u001b[32mdriver_hourly_stats\u001b[0m\n", + "Created on demand feature view \u001b[1m\u001b[32mtransformed_conv_rate\u001b[0m\n", + "Created feature service \u001b[1m\u001b[32mdriver_activity_v1\u001b[0m\n", + "\n", + "Created sqlite table \u001b[1m\u001b[32mfeature_repo_driver_hourly_stats\u001b[0m\n", + "\n" + ] + } + ], + "source": [ + "!feast apply" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uV7rtRQgzyf0" + }, + "source": [ + "## Step 4: Generating training data or powering batch scoring models\n", + "\n", + "To train a model, we need features and labels. Often, this label data is stored separately (e.g. you have one table storing user survey results and another set of tables with feature values). Feast can help generate the features that map to these labels.\n", + "\n", + "Feast needs a list of **entities** (e.g. driver ids) and **timestamps**. Feast will intelligently join relevant \n", + "tables to create the relevant feature vectors. There are two ways to generate this list:\n", + "1. The user can query that table of labels with timestamps and pass that into Feast as an _entity dataframe_ for \n", + "training data generation. \n", + "2. The user can also query that table with a *SQL query* which pulls entities. See the documentation on [feature retrieval](https://docs.feast.dev/getting-started/concepts/feature-retrieval) for details \n", + "\n", + "* Note that we include timestamps because we want the features for the same driver at various timestamps to be used in a model.\n", + "\n", + "### Step 4a: Generating training data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "C6Fzia7YwBzz", + "outputId": "58c4c3dd-7a10-4f56-901d-1bb879ebbcb8" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "a-PUsUWUxoH9", - "outputId": "fc52dc04-db87-4f48-df36-d3941d485600" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'acc_rate': [0.86463862657547, 0.6959823369979858],\n", - " 'avg_daily_trips': [359, 311],\n", - " 'conv_rate_plus_val1': [1000.6638441681862, 1001.1511893719435],\n", - " 'conv_rate_plus_val2': [2000.6638441681862, 2002.1511893719435],\n", - " 'driver_id': [1001, 1002]}\n" - ] - } - ], - "source": [ - "from pprint import pprint\n", - "from feast import FeatureStore\n", - "\n", - "store = FeatureStore(repo_path=\".\")\n", - "\n", - "feature_vector = store.get_online_features(\n", - " features=[\n", - " \"driver_hourly_stats:acc_rate\",\n", - " \"driver_hourly_stats:avg_daily_trips\",\n", - " \"transformed_conv_rate:conv_rate_plus_val1\",\n", - " \"transformed_conv_rate:conv_rate_plus_val2\",\n", - " ],\n", - " entity_rows=[\n", - " # {join_key: entity_value}\n", - " {\n", - " \"driver_id\": 1001,\n", - " \"val_to_add\": 1000,\n", - " \"val_to_add_2\": 2000,\n", - " },\n", - " {\n", - " \"driver_id\": 1002,\n", - " \"val_to_add\": 1001,\n", - " \"val_to_add_2\": 2002,\n", - " },\n", - " ],\n", - ").to_dict()\n", - "\n", - "pprint(feature_vector)" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "----- Feature schema -----\n", + "\n", + "\n", + "RangeIndex: 3 entries, 0 to 2\n", + "Data columns (total 10 columns):\n", + " # Column Non-Null Count Dtype \n", + "--- ------ -------------- ----- \n", + " 0 driver_id 3 non-null int64 \n", + " 1 event_timestamp 3 non-null datetime64[ns, UTC]\n", + " 2 label_driver_reported_satisfaction 3 non-null int64 \n", + " 3 val_to_add 3 non-null int64 \n", + " 4 val_to_add_2 3 non-null int64 \n", + " 5 conv_rate 3 non-null float32 \n", + " 6 acc_rate 3 non-null float32 \n", + " 7 avg_daily_trips 3 non-null int32 \n", + " 8 conv_rate_plus_val1 3 non-null float64 \n", + " 9 conv_rate_plus_val2 3 non-null float64 \n", + "dtypes: datetime64[ns, UTC](1), float32(2), float64(2), int32(1), int64(4)\n", + "memory usage: 332.0 bytes\n", + "None\n", + "\n", + "----- Example features -----\n", + "\n", + " driver_id event_timestamp label_driver_reported_satisfaction \\\n", + "0 1001 2021-04-12 10:59:42+00:00 1 \n", + "1 1002 2021-04-12 08:12:10+00:00 5 \n", + "2 1003 2021-04-12 16:40:26+00:00 3 \n", + "\n", + " val_to_add val_to_add_2 conv_rate acc_rate avg_daily_trips \\\n", + "0 1 10 0.356766 0.051319 93 \n", + "1 2 20 0.130452 0.359439 522 \n", + "2 3 30 0.666570 0.343380 266 \n", + "\n", + " conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 1.356766 10.356766 \n", + "1 2.130452 20.130452 \n", + "2 3.666570 30.666570 \n" + ] + } + ], + "source": [ + "from datetime import datetime\n", + "import pandas as pd\n", + "\n", + "from feast import FeatureStore\n", + "\n", + "# The entity dataframe is the dataframe we want to enrich with feature values\n", + "# Note: see https://docs.feast.dev/getting-started/concepts/feature-retrieval for more details on how to retrieve\n", + "# for all entities in the offline store instead\n", + "entity_df = pd.DataFrame.from_dict(\n", + " {\n", + " # entity's join key -> entity values\n", + " \"driver_id\": [1001, 1002, 1003],\n", + " # \"event_timestamp\" (reserved key) -> timestamps\n", + " \"event_timestamp\": [\n", + " datetime(2021, 4, 12, 10, 59, 42),\n", + " datetime(2021, 4, 12, 8, 12, 10),\n", + " datetime(2021, 4, 12, 16, 40, 26),\n", + " ],\n", + " # (optional) label name -> label values. Feast does not process these\n", + " \"label_driver_reported_satisfaction\": [1, 5, 3],\n", + " # values we're using for an on-demand transformation\n", + " \"val_to_add\": [1, 2, 3],\n", + " \"val_to_add_2\": [10, 20, 30],\n", + " }\n", + ")\n", + "\n", + "store = FeatureStore(repo_path=\".\")\n", + "\n", + "training_df = store.get_historical_features(\n", + " entity_df=entity_df,\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"transformed_conv_rate:conv_rate_plus_val1\",\n", + " \"transformed_conv_rate:conv_rate_plus_val2\",\n", + " ],\n", + ").to_df()\n", + "\n", + "print(\"----- Feature schema -----\\n\")\n", + "print(training_df.info())\n", + "\n", + "print()\n", + "print(\"----- Example features -----\\n\")\n", + "print(training_df.head())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GFiXVdhz04t0" + }, + "source": [ + "### Step 4b: Run offline inference (batch scoring)\n", + "To power a batch model, we primarily need to generate features with the `get_historical_features` call, but using the current timestamp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rGR_xgIs04t0", + "outputId": "3496e5a1-79ff-4f3c-e35d-22b594992708" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "SRY87OMBoK_Z" - }, - "source": [ - "### Fetching features using feature services\n", - "You can also use feature services to manage multiple features, and decouple feature view definitions and the features needed by end applications. The feature store can also be used to fetch either online or historical features using the same api below. More information can be found [here](https://docs.feast.dev/getting-started/concepts/feature-retrieval).\n", - "\n", - " The `driver_activity_v1` feature service pulls all features from the `driver_hourly_stats` feature view:\n", - "\n", - "```python\n", - "driver_stats_fs = FeatureService(\n", - " name=\"driver_activity_v1\", features=[driver_hourly_stats_view]\n", - ")\n", - "```" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "----- Example features -----\n", + "\n", + " driver_id event_timestamp \\\n", + "0 1001 2022-08-08 18:22:06.555018+00:00 \n", + "1 1002 2022-08-08 18:22:06.555018+00:00 \n", + "2 1003 2022-08-08 18:22:06.555018+00:00 \n", + "\n", + " label_driver_reported_satisfaction val_to_add val_to_add_2 conv_rate \\\n", + "0 1 1 10 0.663844 \n", + "1 5 2 20 0.151189 \n", + "2 3 3 30 0.769165 \n", + "\n", + " acc_rate avg_daily_trips conv_rate_plus_val1 conv_rate_plus_val2 \n", + "0 0.864639 359 1.663844 10.663844 \n", + "1 0.695982 311 2.151189 20.151189 \n", + "2 0.949191 789 3.769165 30.769165 \n" + ] + } + ], + "source": [ + "entity_df[\"event_timestamp\"] = pd.to_datetime(\"now\", utc=True)\n", + "training_df = store.get_historical_features(\n", + " entity_df=entity_df,\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"transformed_conv_rate:conv_rate_plus_val1\",\n", + " \"transformed_conv_rate:conv_rate_plus_val2\",\n", + " ],\n", + ").to_df()\n", + "\n", + "print(\"\\n----- Example features -----\\n\")\n", + "print(training_df.head())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ngl7HCtmz3hG" + }, + "source": [ + "## Step 5: Load features into your online store" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KCXUpiQ_pmDk" + }, + "source": [ + "### Step 5a: Using `materialize_incremental`\n", + "\n", + "We now serialize the latest values of features since the beginning of time to prepare for serving. Note, `materialize_incremental` serializes all new features since the last `materialize` call, or since the time provided minus the `ttl` timedelta. In this case, this will be `CURRENT_TIME - 1 day` (`ttl` was set on the `FeatureView` instances in [feature_repo/feature_repo/example_repo.py](feature_repo/feature_repo/example_repo.py)). \n", + "\n", + "```bash\n", + "CURRENT_TIME=$(date -u +\"%Y-%m-%dT%H:%M:%S\")\n", + "feast materialize-incremental $CURRENT_TIME\n", + "```\n", + "\n", + "An alternative to using the CLI command is to use Python:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "7Z6QxIebAhK5", + "outputId": "9b54777d-2dd8-4ec3-b4e7-e3275800a980" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "BrnAEKlPn9s8", - "outputId": "45f7f075-5243-4fa7-dbd4-63c0c22a68cd" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'acc_rate': [0.86463862657547, 0.6959823369979858],\n", - " 'avg_daily_trips': [359, 311],\n", - " 'conv_rate': [0.6638441681861877, 0.15118937194347382],\n", - " 'conv_rate_plus_val1': [1000.6638441681862, 1001.1511893719435],\n", - " 'conv_rate_plus_val2': [2000.6638441681862, 2002.1511893719435],\n", - " 'driver_id': [1001, 1002]}\n" - ] - } - ], - "source": [ - "from feast import FeatureStore\n", - "feature_store = FeatureStore('.') # Initialize the feature store\n", - "\n", - "feature_service = feature_store.get_feature_service(\"driver_activity_v1\")\n", - "feature_vector = feature_store.get_online_features(\n", - " features=feature_service,\n", - " entity_rows=[\n", - " # {join_key: entity_value}\n", - " {\n", - " \"driver_id\": 1001,\n", - " \"val_to_add\": 1000,\n", - " \"val_to_add_2\": 2000,\n", - " },\n", - " {\n", - " \"driver_id\": 1002,\n", - " \"val_to_add\": 1001,\n", - " \"val_to_add_2\": 2002,\n", - " },\n", - " ],\n", - ").to_dict()\n", - "pprint(feature_vector)" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Materializing \u001b[1m\u001b[32m1\u001b[0m feature views to \u001b[1m\u001b[32m2022-08-08 14:19:04-04:00\u001b[0m into the \u001b[1m\u001b[32msqlite\u001b[0m online store.\n", + "\n", + "\u001b[1m\u001b[32mdriver_hourly_stats\u001b[0m from \u001b[1m\u001b[32m2022-08-07 18:19:04-04:00\u001b[0m to \u001b[1m\u001b[32m2022-08-08 14:19:04-04:00\u001b[0m:\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "PvPOSPV904t7" - }, - "source": [ - "## Step 7: Making streaming features available in Feast\n", - "Feast does not directly ingest from streaming sources. Instead, Feast relies on a push-based model to push features into Feast. You can write a streaming pipeline that generates features, which can then be pushed to the offline store, the online store, or both (depending on your needs).\n", - "\n", - "This relies on the `PushSource` defined above. Pushing to this source will populate all dependent feature views with the pushed feature values." - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 346.47it/s]\n" + ] + } + ], + "source": [ + "from datetime import datetime\n", + "store.materialize_incremental(datetime.now())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l7t12bhH4i9H" + }, + "source": [ + "### Step 5b: Inspect materialized features\n", + "\n", + "Note that now there are `online_store.db` and `registry.db`, which store the materialized features and schema information, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "aVIgSYhI4cvR", + "outputId": "3c60f99c-2471-4343-83ed-cc60a6a9c3b2" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uAg5xKDF04t7", - "outputId": "8288b911-125f-4141-b286-f6f84bcb24ea" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "--- Simulate a stream event ingestion of the hourly stats df ---\n", - " driver_id event_timestamp created conv_rate acc_rate \\\n", - "0 1001 2021-05-13 10:59:42 2021-05-13 10:59:42 1.0 1.0 \n", - "\n", - " avg_daily_trips \n", - "0 1000 \n" - ] - } - ], - "source": [ - "from feast.data_source import PushMode\n", - "\n", - "print(\"\\n--- Simulate a stream event ingestion of the hourly stats df ---\")\n", - "event_df = pd.DataFrame.from_dict(\n", - " {\n", - " \"driver_id\": [1001],\n", - " \"event_timestamp\": [\n", - " datetime(2021, 5, 13, 10, 59, 42),\n", - " ],\n", - " \"created\": [\n", - " datetime(2021, 5, 13, 10, 59, 42),\n", - " ],\n", - " \"conv_rate\": [1.0],\n", - " \"acc_rate\": [1.0],\n", - " \"avg_daily_trips\": [1000],\n", - " }\n", - ")\n", - "print(event_df)\n", - "store.push(\"driver_stats_push_source\", event_df, to=PushMode.ONLINE_AND_OFFLINE)" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Data directory ---\n", + "driver_stats.parquet online_store.db registry.db\n", + "\n", + "--- Schema of online store ---\n", + "['entity_key', 'feature_name', 'value', 'event_ts', 'created_ts']\n" + ] + } + ], + "source": [ + "print(\"--- Data directory ---\")\n", + "!ls data\n", + "\n", + "import sqlite3\n", + "import pandas as pd\n", + "con = sqlite3.connect(\"data/online_store.db\")\n", + "print(\"\\n--- Schema of online store ---\")\n", + "print(\n", + " pd.read_sql_query(\n", + " \"SELECT * FROM feature_repo_driver_hourly_stats\", con).columns.tolist())\n", + "con.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AWcttaGalzAm" + }, + "source": [ + "### Quick note on entity keys\n", + "Note from the above command that the online store indexes by `entity_key`. \n", + "\n", + "[Entity keys](https://docs.feast.dev/getting-started/concepts/entity#entity-key) include a list of all entities needed (e.g. all relevant primary keys) to generate the feature vector. In this case, this is a serialized version of the `driver_id`. We use this later to fetch all features for a given driver at inference time." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GNecKOaI0J2Z" + }, + "source": [ + "## Step 6: Fetching real-time feature vectors for online inference" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TBFlKRsOAhK8" + }, + "source": [ + "At inference time, we need to quickly read the latest feature values for different drivers (which otherwise might have existed only in batch sources) from the online feature store using `get_online_features()`. These feature vectors can then be fed to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "a-PUsUWUxoH9", + "outputId": "fc52dc04-db87-4f48-df36-d3941d485600" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "lg68gH2sy6H1" - }, - "source": [ - "# Next steps\n", - "\n", - "- Read the [Concepts](https://docs.feast.dev/getting-started/concepts/) page to understand the Feast data model and architecture.\n", - "- Check out our [Tutorials](https://docs.feast.dev/tutorials/tutorials-overview) section for more examples on how to use Feast.\n", - "- Follow our [Running Feast with Snowflake/GCP/AWS](https://docs.feast.dev/how-to-guides/feast-snowflake-gcp-aws) guide for a more in-depth tutorial on using Feast.\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "{'acc_rate': [0.86463862657547, 0.6959823369979858],\n", + " 'avg_daily_trips': [359, 311],\n", + " 'conv_rate_plus_val1': [1000.6638441681862, 1001.1511893719435],\n", + " 'conv_rate_plus_val2': [2000.6638441681862, 2002.1511893719435],\n", + " 'driver_id': [1001, 1002]}\n" + ] } - ], - "metadata": { + ], + "source": [ + "from pprint import pprint\n", + "from feast import FeatureStore\n", + "\n", + "store = FeatureStore(repo_path=\".\")\n", + "\n", + "feature_vector = store.get_online_features(\n", + " features=[\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"transformed_conv_rate:conv_rate_plus_val1\",\n", + " \"transformed_conv_rate:conv_rate_plus_val2\",\n", + " ],\n", + " entity_rows=[\n", + " # {join_key: entity_value}\n", + " {\n", + " \"driver_id\": 1001,\n", + " \"val_to_add\": 1000,\n", + " \"val_to_add_2\": 2000,\n", + " },\n", + " {\n", + " \"driver_id\": 1002,\n", + " \"val_to_add\": 1001,\n", + " \"val_to_add_2\": 2002,\n", + " },\n", + " ],\n", + ").to_dict()\n", + "\n", + "pprint(feature_vector)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SRY87OMBoK_Z" + }, + "source": [ + "### Fetching features using feature services\n", + "You can also use feature services to manage multiple features, and decouple feature view definitions and the features needed by end applications. The feature store can also be used to fetch either online or historical features using the same api below. More information can be found [here](https://docs.feast.dev/getting-started/concepts/feature-retrieval).\n", + "\n", + " The `driver_activity_v1` feature service pulls all features from the `driver_hourly_stats` feature view:\n", + "\n", + "```python\n", + "driver_stats_fs = FeatureService(\n", + " name=\"driver_activity_v1\", features=[driver_hourly_stats_view]\n", + ")\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { "colab": { - "collapsed_sections": [], - "name": "quickstart.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3.8.10 64-bit ('python-3.8')", - "language": "python", - "name": "python3" + "base_uri": "https://localhost:8080/" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - }, - "vscode": { - "interpreter": { - "hash": "7d634b9af180bcb32a446a43848522733ff8f5bbf0cc46dba1a83bede04bf237" - } + "id": "BrnAEKlPn9s8", + "outputId": "45f7f075-5243-4fa7-dbd4-63c0c22a68cd" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'acc_rate': [0.86463862657547, 0.6959823369979858],\n", + " 'avg_daily_trips': [359, 311],\n", + " 'conv_rate': [0.6638441681861877, 0.15118937194347382],\n", + " 'conv_rate_plus_val1': [1000.6638441681862, 1001.1511893719435],\n", + " 'conv_rate_plus_val2': [2000.6638441681862, 2002.1511893719435],\n", + " 'driver_id': [1001, 1002]}\n" + ] } + ], + "source": [ + "from feast import FeatureStore\n", + "feature_store = FeatureStore('.') # Initialize the feature store\n", + "\n", + "feature_service = feature_store.get_feature_service(\"driver_activity_v1\")\n", + "feature_vector = feature_store.get_online_features(\n", + " features=feature_service,\n", + " entity_rows=[\n", + " # {join_key: entity_value}\n", + " {\n", + " \"driver_id\": 1001,\n", + " \"val_to_add\": 1000,\n", + " \"val_to_add_2\": 2000,\n", + " },\n", + " {\n", + " \"driver_id\": 1002,\n", + " \"val_to_add\": 1001,\n", + " \"val_to_add_2\": 2002,\n", + " },\n", + " ],\n", + ").to_dict()\n", + "pprint(feature_vector)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PvPOSPV904t7" + }, + "source": [ + "## Step 7: Making streaming features available in Feast\n", + "Feast does not directly ingest from streaming sources. Instead, Feast relies on a push-based model to push features into Feast. You can write a streaming pipeline that generates features, which can then be pushed to the offline store, the online store, or both (depending on your needs).\n", + "\n", + "This relies on the `PushSource` defined above. Pushing to this source will populate all dependent feature views with the pushed feature values." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uAg5xKDF04t7", + "outputId": "8288b911-125f-4141-b286-f6f84bcb24ea" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Simulate a stream event ingestion of the hourly stats df ---\n", + " driver_id event_timestamp created conv_rate acc_rate \\\n", + "0 1001 2021-05-13 10:59:42 2021-05-13 10:59:42 1.0 1.0 \n", + "\n", + " avg_daily_trips \n", + "0 1000 \n" + ] + } + ], + "source": [ + "from feast.data_source import PushMode\n", + "\n", + "print(\"\\n--- Simulate a stream event ingestion of the hourly stats df ---\")\n", + "event_df = pd.DataFrame.from_dict(\n", + " {\n", + " \"driver_id\": [1001],\n", + " \"event_timestamp\": [\n", + " datetime(2021, 5, 13, 10, 59, 42),\n", + " ],\n", + " \"created\": [\n", + " datetime(2021, 5, 13, 10, 59, 42),\n", + " ],\n", + " \"conv_rate\": [1.0],\n", + " \"acc_rate\": [1.0],\n", + " \"avg_daily_trips\": [1000],\n", + " }\n", + ")\n", + "print(event_df)\n", + "store.push(\"driver_stats_push_source\", event_df, to=PushMode.ONLINE_AND_OFFLINE)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lg68gH2sy6H1" + }, + "source": [ + "# Next steps\n", + "\n", + "- Read the [Concepts](https://docs.feast.dev/getting-started/concepts/) page to understand the Feast data model and architecture.\n", + "- Check out our [Tutorials](https://docs.feast.dev/tutorials/tutorials-overview) section for more examples on how to use Feast.\n", + "- Follow our [Running Feast with Snowflake/GCP/AWS](https://docs.feast.dev/how-to-guides/feast-snowflake-gcp-aws) guide for a more in-depth tutorial on using Feast.\n" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "quickstart.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('python-3.8')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" }, - "nbformat": 4, - "nbformat_minor": 0 + "vscode": { + "interpreter": { + "hash": "7d634b9af180bcb32a446a43848522733ff8f5bbf0cc46dba1a83bede04bf237" + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/examples/rag/README.md b/examples/rag/README.md new file mode 100644 index 00000000000..88775fae0ed --- /dev/null +++ b/examples/rag/README.md @@ -0,0 +1,87 @@ +# 🚀 Quickstart: Retrieval-Augmented Generation (RAG) using Feast and Large Language Models (LLMs) + +This project demonstrates how to use **Feast** to power a **Retrieval-Augmented Generation (RAG)** application. +The RAG architecture combines retrieval of documents (using vector search) with In-Context-Learning (ICL) through a +**Large Language Model (LLM)** to answer user questions accurately using structured and unstructured data. + +## 💡 Why Use Feast for RAG? + +- **Online retrieval of features:** Ensure real-time access to precomputed document embeddings and other structured data. +- **Declarative feature definitions:** Define feature views and entities in a Python file and empower Data Scientists to easily ship scalabe RAG applications with all of the existing benefits of Feast. +- **Vector search:** Leverage Feast’s integration with vector databases like **Milvus** to find relevant documents based on a similarity metric (e.g., cosine). +- **Structured and unstructured context:** Retrieve both embeddings and traditional features, injecting richer context into LLM prompts. +- **Versioning and reusability:** Collaborate across teams with discoverable, versioned data pipelines. + +--- + +## 📂 Project Structure + +- **`data/`**: Contains the demo data, including Wikipedia summaries of cities with sentence embeddings stored in a Parquet file. +- **`example_repo.py`**: Defines the feature views and entity configurations for Feast. +- **`feature_store.yaml`**: Configures the offline and online stores (using local files and Milvus Lite in this demo). +- **`test_workflow.py`**: Demonstrates key Feast commands to define, retrieve, and push features. + +--- + +## 🛠️ Setup + +1. **Install the necessary packages**: + ```bash + pip install feast torch transformers openai + ``` +2. Initialize and inspect the feature store: + + ```bash + feast apply + ``` + +3. Materialize features into the online store: + + ```python + store.write_to_online_store(feature_view_name='city_embeddings', df=df) + ``` +4. Run a query: + +- Prepare your question: +`question = "Which city has the largest population in New York?"` +- Embed the question using sentence-transformers/all-MiniLM-L6-v2. +- Retrieve the top K most relevant documents using Milvus vector search. +- Pass the retrieved context to the OpenAI model for conversational output. + +## 🛠️ Key Commands for Data Scientists +- Apply feature definitions: + +```bash +feast apply +``` + +- Materialize features to the online store: +```python +store.write_to_online_store(feature_view_name='city_embeddings', df=df) +``` + +- Inspect retrieved features using Python: +```python +context_data = store.retrieve_online_documents_v2( + features=[ + "city_embeddings:vector", + "city_embeddings:item_id", + "city_embeddings:state", + "city_embeddings:sentence_chunks", + "city_embeddings:wiki_summary", + ], + query=query, + top_k=3, + distance_metric='COSINE', +).to_df() +display(context_data) +``` + +📊 Example Output +When querying: Which city has the largest population in New York? + +The model provides: + +``` +The largest city in New York is New York City, often referred to as NYC. It is the most populous city in the United States, with an estimated population of 8,335,897 in 2022. +``` \ No newline at end of file diff --git a/examples/rag/__init__.py b/examples/rag/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/rag/feature_repo/__init__.py b/examples/rag/feature_repo/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/rag/feature_repo/data/city_wikipedia_summaries_with_embeddings.parquet b/examples/rag/feature_repo/data/city_wikipedia_summaries_with_embeddings.parquet new file mode 100644 index 00000000000..63270802fdf Binary files /dev/null and b/examples/rag/feature_repo/data/city_wikipedia_summaries_with_embeddings.parquet differ diff --git a/examples/rag/feature_repo/example_repo.py b/examples/rag/feature_repo/example_repo.py new file mode 100644 index 00000000000..e0a9be21452 --- /dev/null +++ b/examples/rag/feature_repo/example_repo.py @@ -0,0 +1,42 @@ +from datetime import timedelta + +from feast import ( + FeatureView, + Field, + FileSource, +) +from feast.data_format import ParquetFormat +from feast.types import Float32, Array, String, ValueType +from feast import Entity + +item = Entity( + name="item_id", + description="Item ID", + value_type=ValueType.INT64, +) + +parquet_file_path = "./data/city_wikipedia_summaries_with_embeddings.parquet" + +source = FileSource( + file_format=ParquetFormat(), + path=parquet_file_path, + timestamp_field="event_timestamp", +) + +city_embeddings_feature_view = FeatureView( + name="city_embeddings", + entities=[item], + schema=[ + Field( + name="vector", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="COSINE", + ), + Field(name="state", dtype=String), + Field(name="sentence_chunks", dtype=String), + Field(name="wiki_summary", dtype=String), + ], + source=source, + ttl=timedelta(hours=2), +) \ No newline at end of file diff --git a/examples/rag/feature_repo/feature_store.yaml b/examples/rag/feature_repo/feature_store.yaml new file mode 100644 index 00000000000..223be052093 --- /dev/null +++ b/examples/rag/feature_repo/feature_store.yaml @@ -0,0 +1,17 @@ +project: rag +provider: local +registry: data/registry.db +online_store: + type: milvus + path: data/online_store.db + vector_enabled: true + embedding_dim: 384 + index_type: "IVF_FLAT" + + +offline_store: + type: file +entity_key_serialization_version: 3 +# By default, no_auth for authentication and authorization, other possible values kubernetes and oidc. Refer the documentation for more details. +auth: + type: no_auth diff --git a/examples/rag/feature_repo/test_workflow.py b/examples/rag/feature_repo/test_workflow.py new file mode 100644 index 00000000000..05cd554d823 --- /dev/null +++ b/examples/rag/feature_repo/test_workflow.py @@ -0,0 +1,74 @@ +import pandas as pd +import torch +import torch.nn.functional as F +from feast import FeatureStore +from transformers import AutoTokenizer, AutoModel +from example_repo import city_embeddings_feature_view, item + +TOKENIZER = "sentence-transformers/all-MiniLM-L6-v2" +MODEL = "sentence-transformers/all-MiniLM-L6-v2" + + +def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[ + 0 + ] # First element of model_output contains all token embeddings + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( + input_mask_expanded.sum(1), min=1e-9 + ) + + +def run_model(sentences, tokenizer, model): + encoded_input = tokenizer( + sentences, padding=True, truncation=True, return_tensors="pt" + ) + # Compute token embeddings + with torch.no_grad(): + model_output = model(**encoded_input) + + sentence_embeddings = mean_pooling(model_output, encoded_input["attention_mask"]) + sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1) + return sentence_embeddings + +def run_demo(): + store = FeatureStore(repo_path=".") + df = pd.read_parquet("./data/city_wikipedia_summaries_with_embeddings.parquet") + embedding_length = len(df['vector'][0]) + print(f'embedding length = {embedding_length}') + + store.apply([city_embeddings_feature_view, item]) + fields = [ + f.name for f in city_embeddings_feature_view.features + ] + city_embeddings_feature_view.entities + [city_embeddings_feature_view.batch_source.timestamp_field] + print('\ndata=') + print(df[fields].head().T) + store.write_to_online_store("city_embeddings", df[fields][0:3]) + + + question = "the most populous city in the state of New York is New York" + tokenizer = AutoTokenizer.from_pretrained(TOKENIZER) + model = AutoModel.from_pretrained(MODEL) + query_embedding = run_model(question, tokenizer, model) + query = query_embedding.detach().cpu().numpy().tolist()[0] + + # Retrieve top k documents + features = store.retrieve_online_documents_v2( + features=[ + "city_embeddings:vector", + "city_embeddings:item_id", + "city_embeddings:state", + "city_embeddings:sentence_chunks", + "city_embeddings:wiki_summary", + ], + query=query, + top_k=3, + ) + print("features =") + print(features.to_df()) + store.teardown() + +if __name__ == "__main__": + run_demo() diff --git a/examples/rag/milvus-quickstart.ipynb b/examples/rag/milvus-quickstart.ipynb new file mode 100644 index 00000000000..2999d3ba43f --- /dev/null +++ b/examples/rag/milvus-quickstart.ipynb @@ -0,0 +1,1023 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "f33a2f4a-48b5-4218-8b3f-fc884070145e", + "metadata": {}, + "source": [ + "!pip install torch\n", + "!pip install transformers\n", + "!pip install openai" + ] + }, + { + "cell_type": "markdown", + "id": "b19cb54f-e63f-4d9b-b7ff-d18a30635cd2", + "metadata": {}, + "source": [ + "# Overview\n", + "\n", + "In this tutorial, we'll use Feast to inject documents and structured data (i.e., features) into the context of an LLM (Large Language Model) to power a RAG Application (Retrieval Augmented Generation).\n", + "\n", + "Feast solves several common issues in this flow:\n", + "1. **Online retrieval:** At inference time, LLMs often need access to data that isn't readily \n", + " available and needs to be precomputed from other data sources.\n", + " * Feast manages deployment to a variety of online stores (e.g. Milvus, DynamoDB, Redis, Google Cloud Datastore) and \n", + " ensures necessary features are consistently _available_ and _freshly computed_ at inference time.\n", + "2. **Vector Search:** Feast has built support for vector similarity search that is easily configured declaritively so users can focus on their application.\n", + "3. **Richer structured data:** Along with vector search, users can query standard structured fields to inject into the LLM context for better user experiences.\n", + "4. **Feature/Context and versioning:** Different teams within an organization are often unable to reuse \n", + " data across projects and services, resulting in duplicate application logic. Models have data dependencies that need \n", + " to be versioned, for example when running A/B tests on model/prompt versions.\n", + " * Feast enables discovery of and collaboration on previously used documents, features, and enables versioning of sets of \n", + " data.\n", + "\n", + "We will:\n", + "1. Deploy a local feature store with a **Parquet file offline store** and **Sqlite online store**.\n", + "2. Write/materialize the data (i.e., feature values) from the offline store (a parquet file) into the online store (Sqlite).\n", + "3. Serve the features using the Feast SDK\n", + "4. Inject the document into the LLM's context to answer questions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "425cf2f7-70b5-423c-a4f2-f470d8638135", + "metadata": {}, + "outputs": [], + "source": [ + "%%sh\n", + "pip install feast -U -q\n", + "echo \"Please restart your runtime now (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded.\"" + ] + }, + { + "cell_type": "markdown", + "id": "db162bb9-e262-4958-990d-fd8f3f1f1249", + "metadata": {}, + "source": [ + "**Reminder**: Please restart your runtime after installing Feast (Runtime -> Restart runtime). This ensures that the correct dependencies are loaded." + ] + }, + { + "cell_type": "markdown", + "id": "a25cf84f-c255-4bb3-a3d7-e5512c1ba10d", + "metadata": {}, + "source": [ + "## Step 2: Create a feature repository\n", + "\n", + "A feature repository is a directory that contains the configuration of the feature store and individual features. This configuration is written as code (Python/YAML) and it's highly recommended that teams track it centrally using git. See [Feature Repository](https://docs.feast.dev/reference/feature-repository) for a detailed explanation of feature repositories.\n", + "\n", + "The easiest way to create a new feature repository to use the `feast init` command. For this demo, you **do not** need to initialize a feast repo.\n", + "\n", + "\n", + "### Demo data scenario \n", + "- We data from Wikipedia about states that we have embedded into sentence embeddings to be used for vector retrieval in a RAG application.\n", + "- We want to generate predictions for driver satisfaction for the rest of the users so we can reach out to potentially dissatisfied users." + ] + }, + { + "cell_type": "raw", + "id": "61dfdc9d8732d5a6", + "metadata": {}, + "source": [ + "!feast init feature_repo" + ] + }, + { + "cell_type": "markdown", + "id": "c969b62f-4f58-49ed-ae23-ace1916de0c0", + "metadata": {}, + "source": [ + "### Step 2a: Inspecting the feature repository\n", + "\n", + "Let's take a look at the demo repo itself. It breaks down into\n", + "\n", + "\n", + "* `data/` contains raw demo parquet data\n", + "* `example_repo.py` contains demo feature definitions\n", + "* `feature_store.yaml` contains a demo setup configuring where data sources are\n", + "* `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features.\n", + " * You can run this with `python test_workflow.py`." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "5d531836-5981-4a34-9367-51b09af18a8a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/farceo/dev/feast/examples/rag/feature_repo\n", + "__init__.py \u001b[1m\u001b[36mdata\u001b[m\u001b[m feature_store.yaml test_milvus.py\n", + "\u001b[1m\u001b[36m__pycache__\u001b[m\u001b[m example_repo.py milvus_demo.db test_workflow.py\n", + "\n", + "./__pycache__:\n", + "example_repo.cpython-311.pyc\n", + "\n", + "./data:\n", + "city_wikipedia_summaries_with_embeddings.parquet\n", + "online_store.db\n", + "registry.db\n" + ] + } + ], + "source": [ + "%cd feature_repo/\n", + "!ls -R" + ] + }, + { + "cell_type": "markdown", + "id": "d14a8073-5030-4d35-9c96-f5360aeaf39f", + "metadata": {}, + "source": [ + "### Step 2b: Inspecting the project configuration\n", + "Let's inspect the setup of the project in `feature_store.yaml`. \n", + "\n", + "The key line defining the overall architecture of the feature store is the **provider**. \n", + "\n", + "The provider value sets default offline and online stores. \n", + "* The offline store provides the compute layer to process historical data (for generating training data & feature \n", + " values for serving). \n", + "* The online store is a low latency store of the latest feature values (for powering real-time inference).\n", + "\n", + "Valid values for `provider` in `feature_store.yaml` are:\n", + "\n", + "* local: use file source with Milvus Lite\n", + "* gcp: use BigQuery/Snowflake with Google Cloud Datastore/Redis\n", + "* aws: use Redshift/Snowflake with DynamoDB/Redis\n", + "\n", + "Note that there are many other offline / online stores Feast works with, including Azure, Hive, Trino, and PostgreSQL via community plugins. See https://docs.feast.dev/roadmap for all supported connectors.\n", + "\n", + "A custom setup can also be made by following [Customizing Feast](https://docs.feast.dev/v/master/how-to-guides/customizing-feast)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "14c830ef-f5a4-4867-ad5c-87e709df7057", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[94mproject\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mrag\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mprovider\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mlocal\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mregistry\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/registry.db\u001b[37m\u001b[39;49;00m\n", + "\u001b[94monline_store\u001b[39;49;00m:\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mtype\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mmilvus\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mpath\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mdata/online_store.db\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mvector_enabled\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mtrue\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94membedding_dim\u001b[39;49;00m:\u001b[37m \u001b[39;49;00m384\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mindex_type\u001b[39;49;00m:\u001b[37m \u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33mIVF_FLAT\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m\u001b[39;49;00m\n", + "\u001b[94moffline_store\u001b[39;49;00m:\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mtype\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mfile\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mentity_key_serialization_version\u001b[39;49;00m:\u001b[37m \u001b[39;49;00m3\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m# By default, no_auth for authentication and authorization, other possible values kubernetes and oidc. Refer the documentation for more details.\u001b[39;49;00m\u001b[37m\u001b[39;49;00m\n", + "\u001b[94mauth\u001b[39;49;00m:\u001b[37m\u001b[39;49;00m\n", + "\u001b[37m \u001b[39;49;00m\u001b[94mtype\u001b[39;49;00m:\u001b[37m \u001b[39;49;00mno_auth\u001b[37m\u001b[39;49;00m\n" + ] + } + ], + "source": [ + "!pygmentize feature_store.yaml" + ] + }, + { + "cell_type": "markdown", + "id": "5ce80d1a-05d3-434d-bd1e-1ade8abd1f9f", + "metadata": {}, + "source": [ + "### Inspecting the raw data\n", + "\n", + "The raw feature data we have in this demo is stored in a local parquet file. The dataset Wikipedia summaries of diferent cities." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "788a27ff-16a4-4b23-8c1c-ba27fd918aa5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "embedding length = 384\n" + ] + } + ], + "source": [ + "import pandas as pd \n", + "\n", + "df = pd.read_parquet(\"./data/city_wikipedia_summaries_with_embeddings.parquet\")\n", + "df['vector'] = df['vector'].apply(lambda x: x.tolist())\n", + "embedding_length = len(df['vector'][0])\n", + "print(f'embedding length = {embedding_length}')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e433178c-51e8-49a7-884c-c9573082ad6d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
iditem_idevent_timestampstatewiki_summarysentence_chunksvector
0002025-01-09 13:36:59.280589New York, New YorkNew York, often called New York City or simply...New York, often called New York City or simply...[0.1465730518102646, -0.07317650318145752, 0.0...
1112025-01-09 13:36:59.280589New York, New YorkNew York, often called New York City or simply...The city comprises five boroughs, each of whic...[0.05218901485204697, -0.08449874818325043, 0....
2222025-01-09 13:36:59.280589New York, New YorkNew York, often called New York City or simply...New York is a global center of finance and com...[0.06769222766160965, -0.07371102273464203, -0...
3332025-01-09 13:36:59.280589New York, New YorkNew York, often called New York City or simply...New York City is the epicenter of the world's ...[0.12095861881971359, -0.04279915615916252, 0....
4442025-01-09 13:36:59.280589New York, New YorkNew York, often called New York City or simply...With an estimated population in 2022 of 8,335,...[0.17943550646305084, -0.09458263963460922, 0....
\n", + "
" + ], + "text/plain": [ + " id item_id event_timestamp state \\\n", + "0 0 0 2025-01-09 13:36:59.280589 New York, New York \n", + "1 1 1 2025-01-09 13:36:59.280589 New York, New York \n", + "2 2 2 2025-01-09 13:36:59.280589 New York, New York \n", + "3 3 3 2025-01-09 13:36:59.280589 New York, New York \n", + "4 4 4 2025-01-09 13:36:59.280589 New York, New York \n", + "\n", + " wiki_summary \\\n", + "0 New York, often called New York City or simply... \n", + "1 New York, often called New York City or simply... \n", + "2 New York, often called New York City or simply... \n", + "3 New York, often called New York City or simply... \n", + "4 New York, often called New York City or simply... \n", + "\n", + " sentence_chunks \\\n", + "0 New York, often called New York City or simply... \n", + "1 The city comprises five boroughs, each of whic... \n", + "2 New York is a global center of finance and com... \n", + "3 New York City is the epicenter of the world's ... \n", + "4 With an estimated population in 2022 of 8,335,... \n", + "\n", + " vector \n", + "0 [0.1465730518102646, -0.07317650318145752, 0.0... \n", + "1 [0.05218901485204697, -0.08449874818325043, 0.... \n", + "2 [0.06769222766160965, -0.07371102273464203, -0... \n", + "3 [0.12095861881971359, -0.04279915615916252, 0.... \n", + "4 [0.17943550646305084, -0.09458263963460922, 0.... " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import display\n", + "\n", + "display(df.head())" + ] + }, + { + "cell_type": "markdown", + "id": "ec07d38d-d0ff-4dc3-b041-3bf24de9e7e3", + "metadata": {}, + "source": [ + "## Step 3: Register feature definitions and deploy your feature store\n", + "\n", + "`feast apply` scans python files in the current directory for feature/entity definitions and deploys infrastructure according to `feature_store.yaml`." + ] + }, + { + "cell_type": "markdown", + "id": "79409ca9-7552-4aa5-b95b-29f836a0d3a5", + "metadata": {}, + "source": [ + "### Step 3a: Inspecting feature definitions\n", + "Let's inspect what `example_repo.py` looks like:\n", + "\n", + "```python\n", + "from datetime import timedelta\n", + "\n", + "from feast import (\n", + " FeatureView,\n", + " Field,\n", + " FileSource,\n", + ")\n", + "from feast.data_format import ParquetFormat\n", + "from feast.types import Float32, Array, String, ValueType\n", + "from feast import Entity\n", + "\n", + "item = Entity(\n", + " name=\"item_id\",\n", + " description=\"Item ID\",\n", + " value_type=ValueType.INT64,\n", + ")\n", + "\n", + "parquet_file_path = \"./data/city_wikipedia_summaries_with_embeddings.parquet\"\n", + "\n", + "source = FileSource(\n", + " file_format=ParquetFormat(),\n", + " path=parquet_file_path,\n", + " timestamp_field=\"event_timestamp\",\n", + ")\n", + "\n", + "city_embeddings_feature_view = FeatureView(\n", + " name=\"city_embeddings\",\n", + " entities=[item],\n", + " schema=[\n", + " Field(\n", + " name=\"vector\",\n", + " dtype=Array(Float32),\n", + " vector_index=True,\n", + " vector_search_metric=\"COSINE\",\n", + " ),\n", + " Field(name=\"state\", dtype=String),\n", + " Field(name=\"sentence_chunks\", dtype=String),\n", + " Field(name=\"wiki_summary\", dtype=String),\n", + " ],\n", + " source=source,\n", + " ttl=timedelta(hours=2),\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "76634929-c84a-4301-93d3-88292335bde0", + "metadata": {}, + "source": [ + "### Step 3b: Applying feature definitions\n", + "Now we run `feast apply` to register the feature views and entities defined in `example_repo.py`, and sets up SQLite online store tables. Note that we had previously specified SQLite as the online store in `feature_store.yaml` by specifying a `local` provider." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "837e1530-e863-4e5f-b206-b6b4b3ca2aa2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/farceo/dev/feast/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pymilvus/client/__init__.py:6: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", + " from pkg_resources import DistributionNotFound, get_distribution\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pkg_resources/__init__.py:3142: DeprecationWarning: Deprecated call to `pkg_resources.declare_namespace('sphinxcontrib')`.\n", + "Implementing implicit namespace packages (as specified in PEP 420) is preferred to `pkg_resources.declare_namespace`. See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages\n", + " declare_namespace(pkg)\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/environs/__init__.py:58: DeprecationWarning: The '__version_info__' attribute is deprecated and will be removed in in a future version. Use feature detection or 'packaging.Version(importlib.metadata.version(\"marshmallow\")).release' instead.\n", + " _SUPPORTS_LOAD_DEFAULT = ma.__version_info__ >= (3, 13)\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"MilvusOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\n", + " warnings.warn(\n", + "No project found in the repository. Using project name rag defined in feature_store.yaml\n", + "Applying changes for project rag\n", + "/Users/farceo/dev/feast/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity 'item_id'.\n", + " entity = cls(\n", + "/Users/farceo/dev/feast/sdk/python/feast/entity.py:173: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " entity = cls(\n", + "Connecting to Milvus in local mode using /Users/farceo/dev/feast/examples/rag/feature_repo/data/online_store.db\n", + "01/29/2025 05:11:55 PM pymilvus.milvus_client.milvus_client DEBUG: Created new connection using: 9fe4c5dfbe434f1babbf9f2a0970fb87\n", + "Deploying infrastructure for \u001b[1m\u001b[32mcity_embeddings\u001b[0m\n" + ] + } + ], + "source": [ + "! feast apply" + ] + }, + { + "cell_type": "markdown", + "id": "ad7654cc-865c-4bb4-8c0f-d3086c5d9f7e", + "metadata": {}, + "source": [ + "## Step 5: Load features into your online store" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "34ded931-3de0-4951-aead-1e8ca1679cbe", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/farceo/dev/feast/sdk/python/feast/feature_view.py:48: DeprecationWarning: Entity value_type will be mandatory in the next release. Please specify a value_type for entity '__dummy'.\n", + " DUMMY_ENTITY = Entity(\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pymilvus/client/__init__.py:6: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", + " from pkg_resources import DistributionNotFound, get_distribution\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pkg_resources/__init__.py:3142: DeprecationWarning: Deprecated call to `pkg_resources.declare_namespace('sphinxcontrib')`.\n", + "Implementing implicit namespace packages (as specified in PEP 420) is preferred to `pkg_resources.declare_namespace`. See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages\n", + " declare_namespace(pkg)\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/environs/__init__.py:58: DeprecationWarning: The '__version_info__' attribute is deprecated and will be removed in in a future version. Use feature detection or 'packaging.Version(importlib.metadata.version(\"marshmallow\")).release' instead.\n", + " _SUPPORTS_LOAD_DEFAULT = ma.__version_info__ >= (3, 13)\n", + "/Users/farceo/dev/feast/.venv/lib/python3.11/site-packages/pydantic/_internal/_fields.py:192: UserWarning: Field name \"vector_enabled\" in \"MilvusOnlineStoreConfig\" shadows an attribute in parent \"VectorStoreConfig\"\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "from datetime import datetime\n", + "from feast import FeatureStore\n", + "\n", + "store = FeatureStore(repo_path=\".\")" + ] + }, + { + "cell_type": "markdown", + "id": "4c784d77-e96c-455c-9f1f-9183bab58d72", + "metadata": {}, + "source": [ + "### Step 5a: Using `materialize_incremental`\n", + "\n", + "We now serialize the latest values of features since the beginning of time to prepare for serving. Note, `materialize_incremental` serializes all new features since the last `materialize` call, or since the time provided minus the `ttl` timedelta. In this case, this will be `CURRENT_TIME - 1 day` (`ttl` was set on the `FeatureView` instances in [feature_repo/feature_repo/example_repo.py](feature_repo/feature_repo/example_repo.py)). \n", + "\n", + "```bash\n", + "CURRENT_TIME=$(date -u +\"%Y-%m-%dT%H:%M:%S\")\n", + "feast materialize-incremental $CURRENT_TIME\n", + "```\n", + "\n", + "An alternative to using the CLI command is to use Python:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a2655725-5cc4-4f07-ade4-dc5e705eed05", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Connecting to Milvus in local mode using data/online_store.db\n" + ] + } + ], + "source": [ + "store.write_to_online_store(feature_view_name='city_embeddings', df=df)" + ] + }, + { + "cell_type": "markdown", + "id": "b836e5b1-1fe2-4e9d-8c9a-bdc91da8254e", + "metadata": {}, + "source": [ + "### Step 5b: Inspect materialized features\n", + "\n", + "Note that now there are `online_store.db` and `registry.db`, which store the materialized features and schema information, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1307b1aa-fecf-4adf-aafc-f65d89ca735c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
item_id_pkcreated_tsevent_tsitem_idsentence_chunksstatevectorwiki_summary
00100000002000000070000006974656d5f696404000000...017364478192805890New York, often called New York City or simply...New York, New York0.146573New York, often called New York City or simply...
10100000002000000070000006974656d5f696404000000...017364478192805890New York, often called New York City or simply...New York, New York-0.073177New York, often called New York City or simply...
20100000002000000070000006974656d5f696404000000...017364478192805890New York, often called New York City or simply...New York, New York0.052114New York, often called New York City or simply...
30100000002000000070000006974656d5f696404000000...017364478192805890New York, often called New York City or simply...New York, New York0.033187New York, often called New York City or simply...
40100000002000000070000006974656d5f696404000000...017364478192805890New York, often called New York City or simply...New York, New York0.012013New York, often called New York City or simply...
\n", + "
" + ], + "text/plain": [ + " item_id_pk created_ts \\\n", + "0 0100000002000000070000006974656d5f696404000000... 0 \n", + "1 0100000002000000070000006974656d5f696404000000... 0 \n", + "2 0100000002000000070000006974656d5f696404000000... 0 \n", + "3 0100000002000000070000006974656d5f696404000000... 0 \n", + "4 0100000002000000070000006974656d5f696404000000... 0 \n", + "\n", + " event_ts item_id \\\n", + "0 1736447819280589 0 \n", + "1 1736447819280589 0 \n", + "2 1736447819280589 0 \n", + "3 1736447819280589 0 \n", + "4 1736447819280589 0 \n", + "\n", + " sentence_chunks state \\\n", + "0 New York, often called New York City or simply... New York, New York \n", + "1 New York, often called New York City or simply... New York, New York \n", + "2 New York, often called New York City or simply... New York, New York \n", + "3 New York, often called New York City or simply... New York, New York \n", + "4 New York, often called New York City or simply... New York, New York \n", + "\n", + " vector wiki_summary \n", + "0 0.146573 New York, often called New York City or simply... \n", + "1 -0.073177 New York, often called New York City or simply... \n", + "2 0.052114 New York, often called New York City or simply... \n", + "3 0.033187 New York, often called New York City or simply... \n", + "4 0.012013 New York, often called New York City or simply... " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pymilvus_client = store._provider._online_store._connect(store.config)\n", + "COLLECTION_NAME = pymilvus_client.list_collections()[0]\n", + "\n", + "milvus_query_result = pymilvus_client.query(\n", + " collection_name=COLLECTION_NAME,\n", + " filter=\"item_id == '0'\",\n", + ")\n", + "pd.DataFrame(milvus_query_result[0]).head()" + ] + }, + { + "cell_type": "markdown", + "id": "5fbf3921-e775-46b7-9915-d18c6592586f", + "metadata": {}, + "source": [ + "### Quick note on entity keys\n", + "Note from the above command that the online store indexes by `entity_key`. \n", + "\n", + "[Entity keys](https://docs.feast.dev/getting-started/concepts/entity#entity-key) include a list of all entities needed (e.g. all relevant primary keys) to generate the feature vector. In this case, this is a serialized version of the `driver_id`. We use this later to fetch all features for a given driver at inference time." + ] + }, + { + "cell_type": "markdown", + "id": "516f6e4a-2d37-4428-8dba-81620a65c2ad", + "metadata": {}, + "source": [ + "## Step 6: Embedding a query using PyTorch and Sentence Transformers" + ] + }, + { + "cell_type": "markdown", + "id": "66b4e67d-6f94-4532-b107-abc4c0f002f1", + "metadata": {}, + "source": [ + "During inference (e.g., during when a user submits a chat message) we need to embed the input text. This can be thought of as a feature transformation of the input data. In this example, we'll do this with a small Sentence Transformer from Hugging Face." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "62da57be-316d-46ee-b8a7-bac54a7faf55", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn.functional as F\n", + "from feast import FeatureStore\n", + "from pymilvus import MilvusClient, DataType, FieldSchema\n", + "from transformers import AutoTokenizer, AutoModel\n", + "from example_repo import city_embeddings_feature_view, item\n", + "\n", + "TOKENIZER = \"sentence-transformers/all-MiniLM-L6-v2\"\n", + "MODEL = \"sentence-transformers/all-MiniLM-L6-v2\"\n", + "\n", + "def mean_pooling(model_output, attention_mask):\n", + " token_embeddings = model_output[\n", + " 0\n", + " ] # First element of model_output contains all token embeddings\n", + " input_mask_expanded = (\n", + " attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n", + " )\n", + " return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(\n", + " input_mask_expanded.sum(1), min=1e-9\n", + " )\n", + "\n", + "def run_model(sentences, tokenizer, model):\n", + " encoded_input = tokenizer(\n", + " sentences, padding=True, truncation=True, return_tensors=\"pt\"\n", + " )\n", + " # Compute token embeddings\n", + " with torch.no_grad():\n", + " model_output = model(**encoded_input)\n", + "\n", + " sentence_embeddings = mean_pooling(model_output, encoded_input[\"attention_mask\"])\n", + " sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)\n", + " return sentence_embeddings" + ] + }, + { + "cell_type": "markdown", + "id": "67868cdf-04e9-4086-bed8-050e4902ed71", + "metadata": {}, + "source": [ + "## Step 7: Fetching real-time vectors and data for online inference" + ] + }, + { + "cell_type": "markdown", + "id": "29b9ae94-7daa-4d56-8bca-9339d09cd1ed", + "metadata": {}, + "source": [ + "At inference time, we need to use vector similarity search through the document embeddings from the online feature store using `retrieve_online_documents_v2()` while passing the embedded query. These feature vectors can then be fed into the context of the LLM." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0c76a526-35dc-4af5-bd46-d181e3a8c23a", + "metadata": {}, + "outputs": [], + "source": [ + "question = \"Which city has the largest population in New York?\"\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(TOKENIZER)\n", + "model = AutoModel.from_pretrained(MODEL)\n", + "query_embedding = run_model(question, tokenizer, model)\n", + "query = query_embedding.detach().cpu().numpy().tolist()[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "d3099708-409b-4d9e-b1d6-8ad86de6fde2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
vectoritem_idstatesentence_chunkswiki_summarydistance
0[0.15548758208751678, -0.08017724752426147, -0...0New York, New YorkNew York, often called New York City or simply...New York, often called New York City or simply...0.743023
\n", + "
" + ], + "text/plain": [ + " vector item_id \\\n", + "0 [0.15548758208751678, -0.08017724752426147, -0... 0 \n", + "\n", + " state sentence_chunks \\\n", + "0 New York, New York New York, often called New York City or simply... \n", + "\n", + " wiki_summary distance \n", + "0 New York, often called New York City or simply... 0.743023 " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import display\n", + "\n", + "# Retrieve top k documents\n", + "context_data = store.retrieve_online_documents_v2(\n", + " features=[\n", + " \"city_embeddings:vector\",\n", + " \"city_embeddings:item_id\",\n", + " \"city_embeddings:state\",\n", + " \"city_embeddings:sentence_chunks\",\n", + " \"city_embeddings:wiki_summary\",\n", + " ],\n", + " query=query,\n", + " top_k=3,\n", + " distance_metric='COSINE',\n", + ").to_df()\n", + "display(context_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "0d56cf77-b09c-4ed7-b26e-3950d351953e", + "metadata": {}, + "outputs": [], + "source": [ + "def format_documents(context_df):\n", + " output_context = \"\"\n", + " unique_documents = context_df.drop_duplicates().apply(\n", + " lambda x: \"City & State = {\" + x['state'] +\"}\\nSummary = {\" + x['wiki_summary'].strip()+\"}\",\n", + " axis=1,\n", + " )\n", + " for i, document_text in enumerate(unique_documents):\n", + " output_context+= f\"****START DOCUMENT {i}****\\n{document_text.strip()}\\n****END DOCUMENT {i}****\"\n", + " return output_context" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "595adf60-54bd-4ec7-966e-5ac08f643f25", + "metadata": {}, + "outputs": [], + "source": [ + "RAG_CONTEXT = format_documents(context_data[['state', 'wiki_summary']])" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3978561a-79a0-48bb-86ca-d81293a0e618", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "****START DOCUMENT 0****\n", + "City & State = {New York, New York}\n", + "Summary = {New York, often called New York City or simply NYC, is the most populous city in the United States, located at the southern tip of New York State on one of the world's largest natural harbors. The city comprises five boroughs, each of which is coextensive with a respective county. New York is a global center of finance and commerce, culture and technology, entertainment and media, academics and scientific output, and the arts and fashion, and, as home to the headquarters of the United Nations, is an important center for international diplomacy. New York City is the epicenter of the world's principal metropolitan economy.\n", + "With an estimated population in 2022 of 8,335,897 distributed over 300.46 square miles (778.2 km2), the city is the most densely populated major city in the United States. New York has more than double the population of Los Angeles, the nation's second-most populous city. New York is the geographical and demographic center of both the Northeast megalopolis and the New York metropolitan area, the largest metropolitan area in the U.S. by both population and urban area. With more than 20.1 million people in its metropolitan statistical area and 23.5 million in its combined statistical area as of 2020, New York City is one of the world's most populous megacities. The city and its metropolitan area are the premier gateway for legal immigration to the United States. As many as 800 languages are spoken in New York, making it the most linguistically diverse city in the world. In 2021, the city was home to nearly 3.1 million residents born outside the U.S., the largest foreign-born population of any city in the world.\n", + "New York City traces its origins to Fort Amsterdam and a trading post founded on the southern tip of Manhattan Island by Dutch colonists in approximately 1624. The settlement was named New Amsterdam (Dutch: Nieuw Amsterdam) in 1626 and was chartered as a city in 1653. The city came under English control in 1664 and was temporarily renamed New York after King Charles II granted the lands to his brother, the Duke of York. before being permanently renamed New York in November 1674. New York City was the capital of the United States from 1785 until 1790. The modern city was formed by the 1898 consolidation of its five boroughs: Manhattan, Brooklyn, Queens, The Bronx, and Staten Island, and has been the largest U.S. city ever since.\n", + "Anchored by Wall Street in the Financial District of Lower Manhattan, New York City has been called both the world's premier financial and fintech center and the most economically powerful city in the world. As of 2022, the New York metropolitan area is the largest metropolitan economy in the world with a gross metropolitan product of over US$2.16 trillion. If the New York metropolitan area were its own country, it would have the tenth-largest economy in the world. The city is home to the world's two largest stock exchanges by market capitalization of their listed companies: the New York Stock Exchange and Nasdaq. New York City is an established safe haven for global investors. As of 2023, New York City is the most expensive city in the world for expatriates to live. New York City is home to the highest number of billionaires, individuals of ultra-high net worth (greater than US$30 million), and millionaires of any city in the world.}\n", + "****END DOCUMENT 0****\n" + ] + } + ], + "source": [ + "print(RAG_CONTEXT)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "09cad16f-4078-42de-80ee-2672dae5608a", + "metadata": {}, + "outputs": [], + "source": [ + "FULL_PROMPT = f\"\"\"\n", + "You are an assistant for answering questions about states. You will be provided documentation from Wikipedia. Provide a conversational answer.\n", + "If you don't know the answer, just say \"I do not know.\" Don't make up an answer.\n", + "\n", + "Here are document(s) you should use when answer the users question:\n", + "{RAG_CONTEXT}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "7bb4a000-8ef3-4006-9c61-7d76fa865d28", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from openai import OpenAI\n", + "\n", + "client = OpenAI(\n", + " api_key=os.environ.get(\"OPENAI_API_KEY\"),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "da814147-9c78-4906-a84a-78fc88c2fc49", + "metadata": {}, + "outputs": [], + "source": [ + "response = client.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": FULL_PROMPT},\n", + " {\"role\": \"user\", \"content\": question}\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "68cbd8df-af73-4dbe-97a9-f3cd89f36f3d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The largest city in New York is New York City, often referred to as NYC. It is the most populous city in the United States, with an estimated population of 8,335,897 in 2022.\n" + ] + } + ], + "source": [ + "print('\\n'.join([c.message.content for c in response.choices]))" + ] + }, + { + "cell_type": "markdown", + "id": "d4f01627-533b-49b0-9814-292360d064c6", + "metadata": {}, + "source": [ + "# End" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/go.mod b/go.mod index 61063a0cdaf..05305c1e6c1 100644 --- a/go.mod +++ b/go.mod @@ -1,50 +1,54 @@ module github.com/feast-dev/feast -go 1.17 +go 1.22.0 -replace github.com/go-python/gopy v0.4.4 => github.com/feast-dev/gopy v0.4.1-0.20220714211711-252048177d85 +toolchain go1.22.5 require ( - github.com/apache/arrow/go/v8 v8.0.0 + github.com/apache/arrow/go/v17 v17.0.0 github.com/ghodss/yaml v1.0.0 - github.com/go-redis/redis/v8 v8.11.4 - github.com/golang/protobuf v1.5.3 - github.com/google/uuid v1.3.0 - github.com/mattn/go-sqlite3 v1.14.12 + github.com/golang/protobuf v1.5.4 + github.com/google/uuid v1.6.0 + github.com/mattn/go-sqlite3 v1.14.23 github.com/pkg/errors v0.9.1 + github.com/redis/go-redis/v9 v9.6.1 + github.com/rs/zerolog v1.33.0 github.com/spaolacci/murmur3 v1.1.0 - github.com/stretchr/testify v1.7.0 - google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.33.0 + github.com/stretchr/testify v1.9.0 + google.golang.org/grpc v1.67.0 + google.golang.org/protobuf v1.34.2 ) require ( github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/thrift v0.15.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/apache/thrift v0.21.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/goccy/go-json v0.9.6 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v2.0.6+incompatible // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/klauspost/asmfmt v1.3.2 // indirect - github.com/klauspost/compress v1.15.1 // indirect - github.com/klauspost/cpuid/v2 v2.0.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect - github.com/pierrec/lz4/v4 v4.1.14 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect - golang.org/x/exp v0.0.0-20220407100705-7b9b53b0aca4 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 83bbc041c5a..41abd905c44 100644 --- a/go.sum +++ b/go.sum @@ -1,1910 +1,107 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/arrow/go/v8 v8.0.0 h1:mG1dDlq8aQO4a/PB00T9H19Ga2imvqoFPHI5cykpibs= -github.com/apache/arrow/go/v8 v8.0.0/go.mod h1:63co72EKYQT9WKr8Y1Yconk4dysC0t79wNDauYO1ZGg= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= -github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/apache/arrow/go/v17 v17.0.0 h1:RRR2bdqKcdbss9Gxy2NS/hK8i4LDMh23L6BbkN5+F54= +github.com/apache/arrow/go/v17 v17.0.0/go.mod h1:jR7QHkODl15PfYyjM2nU+yTLScZ/qfj7OSUZmJ8putc= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= -github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/goccy/go-json v0.9.6 h1:5/4CtRQdtsX0sal8fdVhTaiMN01Ri8BExZZ8iRmHQ6E= -github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v2.0.5+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.6+incompatible h1:XHFReMv7nFFusa+CEokzWbzaYocKXI6C7hdU5Kgh9Lw= -github.com/google/flatbuffers v2.0.6+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.1/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= -github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= -github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.1/go.mod h1:8VHV24/3AZLn3b6Mlp/KuC33LWH687Wq6EnziEB+rsA= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps= -golang.org/x/exp v0.0.0-20220407100705-7b9b53b0aca4 h1:K3x+yU+fbot38x5bQbU2QqUAVyYLEktdNH2GxZLnM3U= -golang.org/x/exp v0.0.0-20220407100705-7b9b53b0aca4/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/go/README.md b/go/README.md index 0bca470919f..d18b75815fc 100644 --- a/go/README.md +++ b/go/README.md @@ -1,109 +1,12 @@ -This directory contains the Go logic that's executed by the `EmbeddedOnlineFeatureServer` from Python. - -## Building and Linking -[gopy](https://github.com/go-python/gopy) generates (and compiles) a CPython extension module from a Go package. That's what we're using here, as visible in [setup.py](../setup.py). - -Under the hood, gopy invokes `go build`, and then templates `cgo` stubs for the Go module that exposes the public functions from the Go module as C functions. -For our project, this stuff can be found at `sdk/python/feast/embedded_go/lib/embedded.go` & `sdk/python/feast/embedded_go/lib/embedded_go.h` after running `make compile-go-lib`. - -## Arrow memory management -Understanding this is the trickiest part of this integration. - -At a high level, when using the Python<>Go integration, the Python layer exports request data into an [Arrow Record batch](https://arrow.apache.org/docs/python/data.html) which is transferred to Go using Arrow's zero copy mechanism. -Similarly, the Go layer converts feature values read from the online store into a Record Batch that's exported to Python using the same mechanics. - -The first thing to note is that from the Python perspective, all the export logic assumes that we're exporting to & importing from C, not Go. This is because pyarrow only interops with C, and the fact we're using Go is an implementation detail not relevant to the Python layer. - -### Export Entities & Request data from Python to Go -The code exporting to C is this, in [online_feature_service.py](../sdk/python/feast/embedded_go/online_features_service.py) -``` -( - entities_c_schema, - entities_ptr_schema, - entities_c_array, - entities_ptr_array, -) = allocate_schema_and_array() -( - req_data_c_schema, - req_data_ptr_schema, - req_data_c_array, - req_data_ptr_array, -) = allocate_schema_and_array() - -batch, schema = map_to_record_batch(entities, join_keys_types) -schema._export_to_c(entities_ptr_schema) -batch._export_to_c(entities_ptr_array) - -batch, schema = map_to_record_batch(request_data) -schema._export_to_c(req_data_ptr_schema) -batch._export_to_c(req_data_ptr_array) -``` - -Under the hood, `allocate_schema_and_array` allocates a pointer (`struct ArrowSchema*` and `struct ArrowArray*`) in native memory (i.e. the C layer) using `cffi`. -Next, the RecordBatch exports to this pointer using [`_export_to_c`](https://github.com/apache/arrow/blob/master/python/pyarrow/table.pxi#L2509), which uses [`ExportRecordBatch`](https://arrow.apache.org/docs/cpp/api/c_abi.html#_CPPv417ExportRecordBatchRK11RecordBatchP10ArrowArrayP11ArrowSchema) under the hood. - -As per the documentation for ExportRecordBatch: -> Status ExportRecordBatch(const RecordBatch &batch, struct ArrowArray *out, struct ArrowSchema *out_schema = NULLPTR) -> Export C++ RecordBatch using the C data interface format. -> -> The record batch is exported as if it were a struct array. The resulting ArrowArray struct keeps the record batch data and buffers alive until its release callback is called by the consumer. +[Update 10/31/2024] This Go feature server code is updated from the Expedia Group's forked Feast branch (https://github.com/EXPEbdodla/feast) on 10/22/2024. Thanks the engineers of the Expedia Groups who contributed and improved the Go feature server. -This is why `GetOnlineFeatures()` in `online_features.go` calls `record.Release()` as below: -``` -entitiesRecord, err := readArrowRecord(entities) -if err != nil { - return err -} -defer entitiesRecord.Release() -... -requestDataRecords, err := readArrowRecord(requestData) -if err != nil { - return err -} -defer requestDataRecords.Release() -``` -Additionally, we need to pass in a pair of pointers to `GetOnlineFeatures()` that are populated by the Go layer, and the resultant feature values can be passed back to Python (via the C layer) using zero-copy semantics. -That happens as follows: -``` -( - features_c_schema, - features_ptr_schema, - features_c_array, - features_ptr_array, -) = allocate_schema_and_array() - -... - -record_batch = pa.RecordBatch._import_from_c( - features_ptr_array, features_ptr_schema -) -``` - -The corresponding Go code that exports this data is: -``` -result := array.NewRecord(arrow.NewSchema(outputFields, nil), outputColumns, int64(numRows)) - -cdata.ExportArrowRecordBatch(result, - cdata.ArrayFromPtr(output.DataPtr), - cdata.SchemaFromPtr(output.SchemaPtr)) -``` - -The documentation for `ExportArrowRecordBatch` is great. It has this super useful caveat: - -> // The release function on the populated CArrowArray will properly decrease the reference counts, -> // and release the memory if the record has already been released. But since this must be explicitly -> // done, make sure it is released so that you do not create a memory leak. - -This implies that the reciever is on the hook for explicitly releasing this memory. - -However, we're using `_import_from_c`, which uses [`ImportRecordBatch`](https://arrow.apache.org/docs/cpp/api/c_abi.html#_CPPv417ImportRecordBatchP10ArrowArrayP11ArrowSchema), which implies that the receiver of the RecordBatch is the new owner of the data. -This is wrapped by pyarrow - and when the corresponding python object goes out of scope, it should clean up the underlying record batch. - -Another thing to note (which I'm not sure may be the source of issues) is that Arrow has the concept of [Memory Pools](https://arrow.apache.org/docs/python/api/memory.html#memory-pools). -Memory pools can be set in python as well as in Go. I *believe* that if we use the CGoArrowAllocator, that uses whatever pool C++ uses, which should be the same as the one used by PyArrow. But this should be vetted. +This directory contains the Go logic that's executed by the `EmbeddedOnlineFeatureServer` from Python. +## Build and Run +To build and run the Go Feature Server locally, create a feature_store.yaml file with necessary configurations and run below commands: -### References -- https://arrow.apache.org/docs/format/CDataInterface.html#memory-management -- https://arrow.apache.org/docs/python/memory.html \ No newline at end of file +```bash + go build -o feast ./go/main.go + ./feast --type=http --port=8080 +``` \ No newline at end of file diff --git a/go/embedded/online_features.go b/go/embedded/online_features.go index 3c470e4b244..3cbd47ae5b7 100644 --- a/go/embedded/online_features.go +++ b/go/embedded/online_features.go @@ -7,13 +7,16 @@ import ( "net" "os" "os/signal" + //"strings" "syscall" "time" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/cdata" - "github.com/apache/arrow/go/v8/arrow/memory" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/cdata" + "github.com/apache/arrow/go/v17/arrow/memory" "google.golang.org/grpc" "github.com/feast-dev/feast/go/internal/feast" @@ -26,6 +29,10 @@ import ( "github.com/feast-dev/feast/go/protos/feast/serving" prototypes "github.com/feast-dev/feast/go/protos/feast/types" "github.com/feast-dev/feast/go/types" + jsonlog "github.com/rs/zerolog/log" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + //grpctrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc" ) type OnlineFeatureService struct { @@ -63,6 +70,7 @@ type LoggingOptions struct { func NewOnlineFeatureService(conf *OnlineFeatureServiceConfig, transformationCallback transformation.TransformationCallback) *OnlineFeatureService { repoConfig, err := registry.NewRepoConfigFromJSON(conf.RepoPath, conf.RepoConfig) if err != nil { + jsonlog.Error().Stack().Err(err).Msg("Failed to convert to RepoConfig") return &OnlineFeatureService{ err: err, } @@ -70,6 +78,7 @@ func NewOnlineFeatureService(conf *OnlineFeatureServiceConfig, transformationCal fs, err := feast.NewFeatureStore(repoConfig, transformationCallback) if err != nil { + jsonlog.Error().Stack().Err(err).Msg("Failed to create NewFeatureStore") return &OnlineFeatureService{ err: err, } @@ -205,7 +214,7 @@ func (s *OnlineFeatureService) GetOnlineFeatures( outputFields := make([]arrow.Field, 0) outputColumns := make([]arrow.Array, 0) - pool := memory.NewCgoArrowAllocator() + pool := memory.NewGoAllocator() for _, featureVector := range resp { outputFields = append(outputFields, arrow.Field{ @@ -254,7 +263,7 @@ func (s *OnlineFeatureService) GetOnlineFeatures( // StartGprcServer starts gRPC server with disabled feature logging and blocks the thread func (s *OnlineFeatureService) StartGprcServer(host string, port int) error { - return s.StartGprcServerWithLogging(host, port, nil, LoggingOptions{}) + return s.StartGrpcServerWithLogging(host, port, nil, LoggingOptions{}) } // StartGprcServerWithLoggingDefaultOpts starts gRPC server with enabled feature logging but default configuration for logging @@ -266,7 +275,7 @@ func (s *OnlineFeatureService) StartGprcServerWithLoggingDefaultOpts(host string WriteInterval: logging.DefaultOptions.WriteInterval, FlushInterval: logging.DefaultOptions.FlushInterval, } - return s.StartGprcServerWithLogging(host, port, writeLoggedFeaturesCallback, defaultOpts) + return s.StartGrpcServerWithLogging(host, port, writeLoggedFeaturesCallback, defaultOpts) } func (s *OnlineFeatureService) constructLoggingService(writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts LoggingOptions) (*logging.LoggingService, error) { @@ -290,9 +299,14 @@ func (s *OnlineFeatureService) constructLoggingService(writeLoggedFeaturesCallba return loggingService, nil } -// StartGprcServerWithLogging starts gRPC server with enabled feature logging +// StartGrpcServerWithLogging starts gRPC server with enabled feature logging // Caller of this function must provide Python callback to flush buffered logs as well as logging configuration (loggingOpts) -func (s *OnlineFeatureService) StartGprcServerWithLogging(host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts LoggingOptions) error { +func (s *OnlineFeatureService) StartGrpcServerWithLogging(host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts LoggingOptions) error { + //if strings.ToLower(os.Getenv("ENABLE_DATADOG_TRACING")) == "true" { + // tracer.Start(tracer.WithRuntimeMetrics()) + // defer tracer.Stop() + //} + loggingService, err := s.constructLoggingService(writeLoggedFeaturesCallback, loggingOpts) if err != nil { return err @@ -304,8 +318,12 @@ func (s *OnlineFeatureService) StartGprcServerWithLogging(host string, port int, return err } + //grpcServer := grpc.NewServer(grpc.UnaryInterceptor(grpctrace.UnaryServerInterceptor())) grpcServer := grpc.NewServer() + serving.RegisterServingServiceServer(grpcServer, ser) + healthService := health.NewServer() + grpc_health_v1.RegisterHealthServer(grpcServer, healthService) go func() { // As soon as these signals are received from OS, try to gracefully stop the gRPC server diff --git a/go/infra/docker/feature-server/Dockerfile b/go/infra/docker/feature-server/Dockerfile new file mode 100644 index 00000000000..cf63bb45594 --- /dev/null +++ b/go/infra/docker/feature-server/Dockerfile @@ -0,0 +1,31 @@ +FROM golang:1.22.5 + +# Update the package list and install the ca-certificates package +RUN apt-get update && apt-get install -y ca-certificates +RUN apt install -y protobuf-compiler + +RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.31.0 +RUN go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 + +# Set the current working directory inside the container +WORKDIR /app + +# Copy the source code into the container +COPY go/ ./go/ +COPY go.mod go.sum ./ + +# Compile Protobuf files +COPY protos/ ./protos/ +RUN mkdir -p go/protos +RUN find ./protos -name "*.proto" \ + -exec protoc --proto_path=protos --go_out=go/protos --go_opt=module=github.com/feast-dev/feast/go/protos --go-grpc_out=go/protos --go-grpc_opt=module=github.com/feast-dev/feast/go/protos {} \; + +# Build the Go application +RUN go build -o feast ./go/main.go + +# Expose ports +EXPOSE 8080 + +# Command to run the executable +# Pass arguments to the executable (Ex: ./feast --type=grpc) +CMD ["./feast"] \ No newline at end of file diff --git a/go/internal/feast/errors.go b/go/internal/feast/errors.go new file mode 100644 index 00000000000..f42b4aad82d --- /dev/null +++ b/go/internal/feast/errors.go @@ -0,0 +1,22 @@ +package feast + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type FeastTransformationServiceNotConfigured struct{} + +func (FeastTransformationServiceNotConfigured) GRPCStatus() *status.Status { + errorStatus := status.New(codes.Internal, "No transformation service configured") + ds, err := errorStatus.WithDetails(&errdetails.LocalizedMessage{Message: "No transformation service configured, required for on-demand feature transformations"}) + if err != nil { + return errorStatus + } + return ds +} + +func (e FeastTransformationServiceNotConfigured) Error() string { + return e.GRPCStatus().Err().Error() +} diff --git a/go/internal/feast/featurestore.go b/go/internal/feast/featurestore.go index ed38411460a..abe1d195def 100644 --- a/go/internal/feast/featurestore.go +++ b/go/internal/feast/featurestore.go @@ -4,7 +4,9 @@ import ( "context" "errors" - "github.com/apache/arrow/go/v8/arrow/memory" + "github.com/apache/arrow/go/v17/arrow/memory" + + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "github.com/feast-dev/feast/go/internal/feast/model" "github.com/feast-dev/feast/go/internal/feast/onlineserving" @@ -20,6 +22,7 @@ type FeatureStore struct { registry *registry.Registry onlineStore onlinestore.OnlineStore transformationCallback transformation.TransformationCallback + transformationService *transformation.GrpcTransformationService } // A Features struct specifies a list of features to be retrieved from the online store. These features @@ -45,18 +48,33 @@ func NewFeatureStore(config *registry.RepoConfig, callback transformation.Transf if err != nil { return nil, err } - - registry, err := registry.NewRegistry(config.GetRegistryConfig(), config.RepoPath) + registryConfig, err := config.GetRegistryConfig() if err != nil { return nil, err } - registry.InitializeRegistry() + registry, err := registry.NewRegistry(registryConfig, config.RepoPath, config.Project) + if err != nil { + return nil, err + } + err = registry.InitializeRegistry() + if err != nil { + return nil, err + } + + var transformationService *transformation.GrpcTransformationService + if transformationServerEndpoint, ok := config.FeatureServer["transformation_service_endpoint"]; ok { + // Use a scalable transformation service like Python Transformation Service. + // Assume the user will define the "transformation_service_endpoint" in the feature_store.yaml file + // under the "feature_server" section. + transformationService, _ = transformation.NewGrpcTransformationService(config, transformationServerEndpoint.(string)) + } return &FeatureStore{ config: config, registry: registry, onlineStore: onlineStore, transformationCallback: callback, + transformationService: transformationService, }, nil } @@ -92,6 +110,10 @@ func (fs *FeatureStore) GetOnlineFeatures( return nil, err } + if len(requestedOnDemandFeatureViews) > 0 && fs.transformationService == nil { + return nil, FeastTransformationServiceNotConfigured{} + } + entityNameToJoinKeyMap, expectedJoinKeysSet, err := onlineserving.GetEntityMaps(requestedFeatureViews, entities) if err != nil { return nil, err @@ -113,7 +135,7 @@ func (fs *FeatureStore) GetOnlineFeatures( } result := make([]*onlineserving.FeatureVector, 0) - arrowMemory := memory.NewCgoArrowAllocator() + arrowMemory := memory.NewGoAllocator() featureViews := make([]*model.FeatureView, len(requestedFeatureViews)) index := 0 for _, featuresAndView := range requestedFeatureViews { @@ -161,13 +183,15 @@ func (fs *FeatureStore) GetOnlineFeatures( result = append(result, vectors...) } - if fs.transformationCallback != nil { + if fs.transformationCallback != nil || fs.transformationService != nil { onDemandFeatures, err := transformation.AugmentResponseWithOnDemandTransforms( + ctx, requestedOnDemandFeatureViews, requestData, joinKeyToEntityValues, result, fs.transformationCallback, + fs.transformationService, arrowMemory, numRows, fullFeatureNames, @@ -297,6 +321,10 @@ func (fs *FeatureStore) readFromOnlineStore(ctx context.Context, entityRows []*p requestedFeatureViewNames []string, requestedFeatureNames []string, ) ([][]onlinestore.FeatureData, error) { + // Create a Datadog span from context + //span, _ := tracer.StartSpanFromContext(ctx, "fs.readFromOnlineStore") + //defer span.Finish() + numRows := len(entityRows) entityRowsValue := make([]*prototypes.EntityKey, numRows) for index, entityKey := range entityRows { diff --git a/go/internal/feast/featurestore_test.go b/go/internal/feast/featurestore_test.go index dd08bc287e9..e1f908b9062 100644 --- a/go/internal/feast/featurestore_test.go +++ b/go/internal/feast/featurestore_test.go @@ -2,71 +2,241 @@ package feast import ( "context" + "log" + "os" "path/filepath" "runtime" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/feast-dev/feast/go/internal/feast/onlinestore" "github.com/feast-dev/feast/go/internal/feast/registry" + "github.com/feast-dev/feast/go/internal/test" + "github.com/feast-dev/feast/go/protos/feast/serving" "github.com/feast-dev/feast/go/protos/feast/types" ) -// Return absolute path to the test_repo registry regardless of the working directory -func getRegistryPath() map[string]interface{} { +var featureRepoBasePath string +var featureRepoRegistryFile string + +func TestMain(m *testing.M) { // Get the file path of this source file, regardless of the working directory _, filename, _, ok := runtime.Caller(0) if !ok { - panic("couldn't find file path of the test file") + log.Print("couldn't find file path of the test file") + os.Exit(1) } - registry := map[string]interface{}{ - "path": filepath.Join(filename, "..", "..", "..", "feature_repo/data/registry.db"), + featureRepoBasePath = filepath.Join(filename, "..", "..", "test") + featureRepoRegistryFile = filepath.Join(featureRepoBasePath, "feature_repo", "data", "registry.db") + if err := test.SetupInitializedRepo(featureRepoBasePath); err != nil { + log.Print("Could not initialize test repo: ", err) + os.Exit(1) } - return registry + os.Exit(m.Run()) } func TestNewFeatureStore(t *testing.T) { - t.Skip("@todo(achals): feature_repo isn't checked in yet") - config := registry.RepoConfig{ - Project: "feature_repo", - Registry: getRegistryPath(), - Provider: "local", - OnlineStore: map[string]interface{}{ - "type": "redis", + tests := []struct { + name string + config *registry.RepoConfig + expectOnlineStoreType interface{} + errMessage string + }{ + { + name: "valid config", + config: ®istry.RepoConfig{ + Project: "feature_repo", + Registry: map[string]interface{}{ + "path": featureRepoRegistryFile, + }, + Provider: "local", + OnlineStore: map[string]interface{}{ + "type": "redis", + }, + }, + expectOnlineStoreType: &onlinestore.RedisOnlineStore{}, + }, + { + name: "valid config with transformation service endpoint", + config: ®istry.RepoConfig{ + Project: "feature_repo", + Registry: map[string]interface{}{ + "path": featureRepoRegistryFile, + }, + Provider: "local", + OnlineStore: map[string]interface{}{ + "type": "redis", + }, + FeatureServer: map[string]interface{}{ + "transformation_service_endpoint": "localhost:50051", + }, + }, + expectOnlineStoreType: &onlinestore.RedisOnlineStore{}, + }, + { + name: "invalid online store config", + config: ®istry.RepoConfig{ + Project: "feature_repo", + Registry: map[string]interface{}{ + "path": featureRepoRegistryFile, + }, + Provider: "local", + OnlineStore: map[string]interface{}{ + "type": "invalid_store", + }, + }, + errMessage: "invalid_store online store type is currently not supported", }, } - fs, err := NewFeatureStore(&config, nil) - assert.Nil(t, err) - assert.IsType(t, &onlinestore.RedisOnlineStore{}, fs.onlineStore) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := NewFeatureStore(test.config, nil) + if test.errMessage != "" { + assert.Nil(t, got) + require.Error(t, err) + assert.ErrorContains(t, err, test.errMessage) + + } else { + require.NoError(t, err) + assert.NotNil(t, got) + assert.IsType(t, test.expectOnlineStoreType, got.onlineStore) + } + }) + } + +} + +type MockRedis struct { + mock.Mock +} + +func (m *MockRedis) Destruct() {} +func (m *MockRedis) OnlineRead(ctx context.Context, entityKeys []*types.EntityKey, featureViewNames []string, featureNames []string) ([][]onlinestore.FeatureData, error) { + args := m.Called(ctx, entityKeys, featureViewNames, featureNames) + var fd [][]onlinestore.FeatureData + if args.Get(0) != nil { + fd = args.Get(0).([][]onlinestore.FeatureData) + } + return fd, args.Error(1) +} + +func TestGetOnlineFeatures(t *testing.T) { + tests := []struct { + name string + config *registry.RepoConfig + fn func(*testing.T, *FeatureStore) + }{ + { + name: "redis with simple features", + config: ®istry.RepoConfig{ + Project: "feature_repo", + Registry: map[string]interface{}{ + "path": featureRepoRegistryFile, + }, + Provider: "local", + OnlineStore: map[string]interface{}{ + "type": "redis", + "connection_string": "localhost:6379", + }, + }, + fn: testRedisSimpleFeatures, + }, + { + name: "redis with On-demand feature views, no transformation service endpoint", + config: ®istry.RepoConfig{ + Project: "feature_repo", + Registry: map[string]interface{}{ + "path": featureRepoRegistryFile, + }, + Provider: "local", + OnlineStore: map[string]interface{}{ + "type": "redis", + "connection_string": "localhost:6379", + }, + }, + fn: testRedisODFVNoTransformationService, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + fs, err := NewFeatureStore(test.config, nil) + require.Nil(t, err) + fs.onlineStore = new(MockRedis) + test.fn(t, fs) + }) + + } } -func TestGetOnlineFeaturesRedis(t *testing.T) { - t.Skip("@todo(achals): feature_repo isn't checked in yet") - config := registry.RepoConfig{ - Project: "feature_repo", - Registry: getRegistryPath(), - Provider: "local", - OnlineStore: map[string]interface{}{ - "type": "redis", - "connection_string": "localhost:6379", +func testRedisSimpleFeatures(t *testing.T, fs *FeatureStore) { + + featureNames := []string{"driver_hourly_stats:conv_rate", + "driver_hourly_stats:acc_rate", + "driver_hourly_stats:avg_daily_trips", + } + entities := map[string]*types.RepeatedValue{"driver_id": {Val: []*types.Value{{Val: &types.Value_Int64Val{Int64Val: 1001}}, + {Val: &types.Value_Int64Val{Int64Val: 1002}}, + }}} + + results := [][]onlinestore.FeatureData{ + { + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "conv_rate"}, + Value: types.Value{Val: &types.Value_FloatVal{FloatVal: 12.0}}, + }, + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "acc_rate"}, + Value: types.Value{Val: &types.Value_FloatVal{FloatVal: 1.0}}, + }, + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "avg_daily_trips"}, + Value: types.Value{Val: &types.Value_Int64Val{Int64Val: 100}}, + }, + }, + { + + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "conv_rate"}, + Value: types.Value{Val: &types.Value_FloatVal{FloatVal: 24.0}}, + }, + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "acc_rate"}, + Value: types.Value{Val: &types.Value_FloatVal{FloatVal: 2.0}}, + }, + { + Reference: serving.FeatureReferenceV2{FeatureViewName: "driver_hourly_stats", FeatureName: "avg_daily_trips"}, + Value: types.Value{Val: &types.Value_Int64Val{Int64Val: 130}}, + }, }, } + ctx := context.Background() + mr := fs.onlineStore.(*MockRedis) + mr.On("OnlineRead", ctx, mock.Anything, mock.Anything, mock.Anything).Return(results, nil) + response, err := fs.GetOnlineFeatures(ctx, featureNames, nil, entities, map[string]*types.RepeatedValue{}, true) + require.Nil(t, err) + assert.Len(t, response, 4) // 3 Features + 1 entity = 4 columns (feature vectors) in response +} +func testRedisODFVNoTransformationService(t *testing.T, fs *FeatureStore) { featureNames := []string{"driver_hourly_stats:conv_rate", "driver_hourly_stats:acc_rate", "driver_hourly_stats:avg_daily_trips", + "transformed_conv_rate:conv_rate_plus_val1", } entities := map[string]*types.RepeatedValue{"driver_id": {Val: []*types.Value{{Val: &types.Value_Int64Val{Int64Val: 1001}}, {Val: &types.Value_Int64Val{Int64Val: 1002}}, {Val: &types.Value_Int64Val{Int64Val: 1003}}}}, } - fs, err := NewFeatureStore(&config, nil) - assert.Nil(t, err) ctx := context.Background() - response, err := fs.GetOnlineFeatures( - ctx, featureNames, nil, entities, map[string]*types.RepeatedValue{}, true) - assert.Nil(t, err) - assert.Len(t, response, 4) // 3 Features + 1 entity = 4 columns (feature vectors) in response + mr := fs.onlineStore.(*MockRedis) + mr.On("OnlineRead", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + response, err := fs.GetOnlineFeatures(ctx, featureNames, nil, entities, map[string]*types.RepeatedValue{}, true) + assert.Nil(t, response) + assert.ErrorAs(t, err, &FeastTransformationServiceNotConfigured{}) + } diff --git a/go/internal/feast/onlineserving/serving.go b/go/internal/feast/onlineserving/serving.go index dc7124fc8b8..2ae733b62bb 100644 --- a/go/internal/feast/onlineserving/serving.go +++ b/go/internal/feast/onlineserving/serving.go @@ -7,9 +7,9 @@ import ( "sort" "strings" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/memory" - "github.com/golang/protobuf/proto" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/memory" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" diff --git a/go/internal/feast/onlinestore/onlinestore.go b/go/internal/feast/onlinestore/onlinestore.go index 88cd3dbd9b5..2f30e16d674 100644 --- a/go/internal/feast/onlinestore/onlinestore.go +++ b/go/internal/feast/onlinestore/onlinestore.go @@ -5,11 +5,9 @@ import ( "fmt" "github.com/feast-dev/feast/go/internal/feast/registry" - - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/feast-dev/feast/go/protos/feast/serving" "github.com/feast-dev/feast/go/protos/feast/types" + "github.com/golang/protobuf/ptypes/timestamp" ) type FeatureData struct { diff --git a/go/internal/feast/onlinestore/redisonlinestore.go b/go/internal/feast/onlinestore/redisonlinestore.go index 8fb85085d43..df47deceecf 100644 --- a/go/internal/feast/onlinestore/redisonlinestore.go +++ b/go/internal/feast/onlinestore/redisonlinestore.go @@ -6,18 +6,23 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/feast-dev/feast/go/internal/feast/registry" + //"os" "sort" "strconv" "strings" - "github.com/go-redis/redis/v8" - "github.com/golang/protobuf/proto" + "github.com/feast-dev/feast/go/internal/feast/registry" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + + "github.com/redis/go-redis/v9" "github.com/spaolacci/murmur3" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" "github.com/feast-dev/feast/go/protos/feast/serving" "github.com/feast-dev/feast/go/protos/feast/types" + "github.com/rs/zerolog/log" + //redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/redis/go-redis.v9" ) type redisType int @@ -39,6 +44,9 @@ type RedisOnlineStore struct { // Redis client connector client *redis.Client + // Redis cluster client connector + clusterClient *redis.ClusterClient + config *registry.RepoConfig } @@ -53,11 +61,12 @@ func NewRedisOnlineStore(project string, config *registry.RepoConfig, onlineStor var tlsConfig *tls.Config var db int // Default to 0 - // Parse redis_type and write it into conf.t - t, err := getRedisType(onlineStoreConfig) + // Parse redis_type and write it into conf.redisStoreType + redisStoreType, err := getRedisType(onlineStoreConfig) if err != nil { return nil, err } + store.t = redisStoreType // Parse connection_string and write it into conf.address, conf.password, and conf.ssl redisConnJson, ok := onlineStoreConfig["connection_string"] @@ -66,7 +75,7 @@ func NewRedisOnlineStore(project string, config *registry.RepoConfig, onlineStor redisConnJson = "localhost:6379" } if redisConnStr, ok := redisConnJson.(string); !ok { - return nil, errors.New(fmt.Sprintf("failed to convert connection_string to string: %+v", redisConnJson)) + return nil, fmt.Errorf("failed to convert connection_string to string: %+v", redisConnJson) } else { parts := strings.Split(redisConnStr, ",") for _, part := range parts { @@ -89,23 +98,42 @@ func NewRedisOnlineStore(project string, config *registry.RepoConfig, onlineStor return nil, err } } else { - return nil, errors.New(fmt.Sprintf("unrecognized option in connection_string: %s. Must be one of 'password', 'ssl'", kv[0])) + return nil, fmt.Errorf("unrecognized option in connection_string: %s. Must be one of 'password', 'ssl'", kv[0]) } } else { - return nil, errors.New(fmt.Sprintf("unable to parse a part of connection_string: %s. Must contain either ':' (addresses) or '=' (options", part)) + return nil, fmt.Errorf("unable to parse a part of connection_string: %s. Must contain either ':' (addresses) or '=' (options", part) } } } - if t == redisNode { + // Metrics are not showing up when the service name is set to DD_SERVICE + //redisTraceServiceName := os.Getenv("DD_SERVICE") + "-redis" + //if redisTraceServiceName == "" { + // redisTraceServiceName = "redis.client" // default service name if DD_SERVICE is not set + //} + + if redisStoreType == redisNode { + log.Info().Msgf("Using Redis: %s", address[0]) store.client = redis.NewClient(&redis.Options{ Addr: address[0], - Password: password, // No password set + Password: password, DB: db, TLSConfig: tlsConfig, }) - } else { - return nil, errors.New("only single node Redis is supported at this time") + //if strings.ToLower(os.Getenv("ENABLE_DATADOG_REDIS_TRACING")) == "true" { + // redistrace.WrapClient(store.client, redistrace.WithServiceName(redisTraceServiceName)) + //} + } else if redisStoreType == redisCluster { + log.Info().Msgf("Using Redis Cluster: %s", address) + store.clusterClient = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: address, + Password: password, + TLSConfig: tlsConfig, + ReadOnly: true, + }) + //if strings.ToLower(os.Getenv("ENABLE_DATADOG_REDIS_TRACING")) == "true" { + // redistrace.WrapClient(store.clusterClient, redistrace.WithServiceName(redisTraceServiceName)) + //} } return &store, nil @@ -119,24 +147,23 @@ func getRedisType(onlineStoreConfig map[string]interface{}) (redisType, error) { // Default to "redis" redisTypeJson = "redis" } else if redisTypeStr, ok := redisTypeJson.(string); !ok { - return -1, errors.New(fmt.Sprintf("failed to convert redis_type to string: %+v", redisTypeJson)) + return -1, fmt.Errorf("failed to convert redis_type to string: %+v", redisTypeJson) } else { if redisTypeStr == "redis" { t = redisNode } else if redisTypeStr == "redis_cluster" { t = redisCluster } else { - return -1, errors.New(fmt.Sprintf("failed to convert redis_type to enum: %s. Must be one of 'redis', 'redis_cluster'", redisTypeStr)) + return -1, fmt.Errorf("failed to convert redis_type to enum: %s. Must be one of 'redis', 'redis_cluster'", redisTypeStr) } } return t, nil } -func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.EntityKey, featureViewNames []string, featureNames []string) ([][]FeatureData, error) { - featureCount := len(featureNames) - index := featureCount +func (r *RedisOnlineStore) buildFeatureViewIndices(featureViewNames []string, featureNames []string) (map[string]int, map[int]string, int) { featureViewIndices := make(map[string]int) indicesFeatureView := make(map[int]string) + index := len(featureNames) for _, featureViewName := range featureViewNames { if _, ok := featureViewIndices[featureViewName]; !ok { featureViewIndices[featureViewName] = index @@ -144,6 +171,11 @@ func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.E index += 1 } } + return featureViewIndices, indicesFeatureView, index +} + +func (r *RedisOnlineStore) buildRedisHashSetKeys(featureViewNames []string, featureNames []string, indicesFeatureView map[int]string, index int) ([]string, []string) { + featureCount := len(featureNames) var hsetKeys = make([]string, index) h := murmur3.New32() intBuffer := h.Sum32() @@ -162,36 +194,59 @@ func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.E hsetKeys[i] = tsKey featureNames = append(featureNames, tsKey) } + return hsetKeys, featureNames +} +func (r *RedisOnlineStore) buildRedisKeys(entityKeys []*types.EntityKey) ([]*[]byte, map[string]int, error) { redisKeys := make([]*[]byte, len(entityKeys)) redisKeyToEntityIndex := make(map[string]int) for i := 0; i < len(entityKeys); i++ { - var key, err = buildRedisKey(r.project, entityKeys[i], r.config.EntityKeySerializationVersion) if err != nil { - return nil, err + return nil, nil, err } redisKeys[i] = key redisKeyToEntityIndex[string(*key)] = i } + return redisKeys, redisKeyToEntityIndex, nil +} - // Retrieve features from Redis - // TODO: Move context object out - - results := make([][]FeatureData, len(entityKeys)) - pipe := r.client.Pipeline() - commands := map[string]*redis.SliceCmd{} - - for _, redisKey := range redisKeys { - keyString := string(*redisKey) - commands[keyString] = pipe.HMGet(ctx, keyString, hsetKeys...) - } +func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.EntityKey, featureViewNames []string, featureNames []string) ([][]FeatureData, error) { + //span, _ := tracer.StartSpanFromContext(ctx, "redis.OnlineRead") + //defer span.Finish() - _, err := pipe.Exec(ctx) + featureCount := len(featureNames) + featureViewIndices, indicesFeatureView, index := r.buildFeatureViewIndices(featureViewNames, featureNames) + hsetKeys, featureNamesWithTimeStamps := r.buildRedisHashSetKeys(featureViewNames, featureNames, indicesFeatureView, index) + redisKeys, redisKeyToEntityIndex, err := r.buildRedisKeys(entityKeys) if err != nil { return nil, err } + results := make([][]FeatureData, len(entityKeys)) + commands := map[string]*redis.SliceCmd{} + + if r.t == redisNode { + pipe := r.client.Pipeline() + for _, redisKey := range redisKeys { + keyString := string(*redisKey) + commands[keyString] = pipe.HMGet(ctx, keyString, hsetKeys...) + } + _, err = pipe.Exec(ctx) + if err != nil { + return nil, err + } + } else if r.t == redisCluster { + pipe := r.clusterClient.Pipeline() + for _, redisKey := range redisKeys { + keyString := string(*redisKey) + commands[keyString] = pipe.HMGet(ctx, keyString, hsetKeys...) + } + _, err = pipe.Exec(ctx) + if err != nil { + return nil, err + } + } var entityIndex int var resContainsNonNil bool for redisKey, values := range commands { @@ -214,7 +269,7 @@ func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.E if resString == nil { // TODO (Ly): Can there be nil result within each feature or they will all be returned as string proto of types.Value_NullVal proto? - featureName := featureNames[featureIndex] + featureName := featureNamesWithTimeStamps[featureIndex] featureViewName := featureViewNames[featureIndex] timeStampIndex := featureViewIndices[featureViewName] timeStampInterface := res[timeStampIndex] @@ -241,7 +296,7 @@ func (r *RedisOnlineStore) OnlineRead(ctx context.Context, entityKeys []*types.E if err := proto.Unmarshal([]byte(valueString), &value); err != nil { return nil, errors.New("error converting parsed redis Value to types.Value") } else { - featureName := featureNames[featureIndex] + featureName := featureNamesWithTimeStamps[featureIndex] featureViewName := featureViewNames[featureIndex] timeStampIndex := featureViewIndices[featureViewName] timeStampInterface := res[timeStampIndex] @@ -290,7 +345,7 @@ func serializeEntityKey(entityKey *types.EntityKey, entityKeySerializationVersio // Ensure that we have the right amount of join keys and entity values if len(entityKey.JoinKeys) != len(entityKey.EntityValues) { - return nil, errors.New(fmt.Sprintf("the amount of join key names and entity values don't match: %s vs %s", entityKey.JoinKeys, entityKey.EntityValues)) + return nil, fmt.Errorf("the amount of join key names and entity values don't match: %s vs %s", entityKey.JoinKeys, entityKey.EntityValues) } // Make sure that join keys are sorted so that we have consistent key building diff --git a/go/internal/feast/onlinestore/redisonlinestore_test.go b/go/internal/feast/onlinestore/redisonlinestore_test.go index ad9ef1e1e44..34adee191e8 100644 --- a/go/internal/feast/onlinestore/redisonlinestore_test.go +++ b/go/internal/feast/onlinestore/redisonlinestore_test.go @@ -1,9 +1,11 @@ package onlinestore import ( - "github.com/feast-dev/feast/go/internal/feast/registry" "testing" + "github.com/feast-dev/feast/go/internal/feast/registry" + "github.com/feast-dev/feast/go/protos/feast/types" + "github.com/stretchr/testify/assert" ) @@ -68,3 +70,125 @@ func TestNewRedisOnlineStoreWithSsl(t *testing.T) { assert.Equal(t, opts.Addr, "redis://localhost:6379") assert.NotNil(t, opts.TLSConfig) } + +func TestBuildFeatureViewIndices(t *testing.T) { + r := &RedisOnlineStore{} + + t.Run("test with empty featureViewNames and featureNames", func(t *testing.T) { + featureViewIndices, indicesFeatureView, index := r.buildFeatureViewIndices([]string{}, []string{}) + assert.Equal(t, 0, len(featureViewIndices)) + assert.Equal(t, 0, len(indicesFeatureView)) + assert.Equal(t, 0, index) + }) + + t.Run("test with non-empty featureNames and empty featureViewNames", func(t *testing.T) { + featureViewIndices, indicesFeatureView, index := r.buildFeatureViewIndices([]string{}, []string{"feature1", "feature2"}) + assert.Equal(t, 0, len(featureViewIndices)) + assert.Equal(t, 0, len(indicesFeatureView)) + assert.Equal(t, 2, index) + }) + + t.Run("test with non-empty featureViewNames and featureNames", func(t *testing.T) { + featureViewIndices, indicesFeatureView, index := r.buildFeatureViewIndices([]string{"view1", "view2"}, []string{"feature1", "feature2"}) + assert.Equal(t, 2, len(featureViewIndices)) + assert.Equal(t, 2, len(indicesFeatureView)) + assert.Equal(t, 4, index) + assert.Equal(t, "view1", indicesFeatureView[2]) + assert.Equal(t, "view2", indicesFeatureView[3]) + }) + + t.Run("test with duplicate featureViewNames", func(t *testing.T) { + featureViewIndices, indicesFeatureView, index := r.buildFeatureViewIndices([]string{"view1", "view1"}, []string{"feature1", "feature2"}) + assert.Equal(t, 1, len(featureViewIndices)) + assert.Equal(t, 1, len(indicesFeatureView)) + assert.Equal(t, 3, index) + assert.Equal(t, "view1", indicesFeatureView[2]) + }) +} + +func TestBuildHsetKeys(t *testing.T) { + r := &RedisOnlineStore{} + + t.Run("test with empty featureViewNames and featureNames", func(t *testing.T) { + hsetKeys, featureNames := r.buildRedisHashSetKeys([]string{}, []string{}, map[int]string{}, 0) + assert.Equal(t, 0, len(hsetKeys)) + assert.Equal(t, 0, len(featureNames)) + }) + + t.Run("test with non-empty featureViewNames and featureNames", func(t *testing.T) { + hsetKeys, featureNames := r.buildRedisHashSetKeys([]string{"view1", "view2"}, []string{"feature1", "feature2"}, map[int]string{2: "view1", 3: "view2"}, 4) + assert.Equal(t, 4, len(hsetKeys)) + assert.Equal(t, 4, len(featureNames)) + assert.Equal(t, "_ts:view1", hsetKeys[2]) + assert.Equal(t, "_ts:view2", hsetKeys[3]) + assert.Contains(t, featureNames, "_ts:view1") + assert.Contains(t, featureNames, "_ts:view2") + }) + + t.Run("test with more featureViewNames than featureNames", func(t *testing.T) { + hsetKeys, featureNames := r.buildRedisHashSetKeys([]string{"view1", "view2", "view3"}, []string{"feature1", "feature2", "feature3"}, map[int]string{3: "view1", 4: "view2", 5: "view3"}, 6) + assert.Equal(t, 6, len(hsetKeys)) + assert.Equal(t, 6, len(featureNames)) + assert.Equal(t, "_ts:view1", hsetKeys[3]) + assert.Equal(t, "_ts:view2", hsetKeys[4]) + assert.Equal(t, "_ts:view3", hsetKeys[5]) + assert.Contains(t, featureNames, "_ts:view1") + assert.Contains(t, featureNames, "_ts:view2") + assert.Contains(t, featureNames, "_ts:view3") + }) +} + +func TestBuildRedisKeys(t *testing.T) { + r := &RedisOnlineStore{ + project: "test_project", + config: ®istry.RepoConfig{ + EntityKeySerializationVersion: 2, + }, + } + + entity_key1 := types.EntityKey{ + JoinKeys: []string{"driver_id"}, + EntityValues: []*types.Value{{Val: &types.Value_Int64Val{Int64Val: 1005}}}, + } + + entity_key2 := types.EntityKey{ + JoinKeys: []string{"driver_id"}, + EntityValues: []*types.Value{{Val: &types.Value_Int64Val{Int64Val: 1001}}}, + } + + error_entity_key1 := types.EntityKey{ + JoinKeys: []string{"driver_id", "vehicle_id"}, + EntityValues: []*types.Value{{Val: &types.Value_Int64Val{Int64Val: 1005}}}, + } + + t.Run("test with empty entityKeys", func(t *testing.T) { + redisKeys, redisKeyToEntityIndex, err := r.buildRedisKeys([]*types.EntityKey{}) + assert.Nil(t, err) + assert.Equal(t, 0, len(redisKeys)) + assert.Equal(t, 0, len(redisKeyToEntityIndex)) + }) + + t.Run("test with single entityKey", func(t *testing.T) { + entityKeys := []*types.EntityKey{&entity_key1} + redisKeys, redisKeyToEntityIndex, err := r.buildRedisKeys(entityKeys) + assert.Nil(t, err) + assert.Equal(t, 1, len(redisKeys)) + assert.Equal(t, 1, len(redisKeyToEntityIndex)) + }) + + t.Run("test with multiple entityKeys", func(t *testing.T) { + entityKeys := []*types.EntityKey{ + &entity_key1, &entity_key2, + } + redisKeys, redisKeyToEntityIndex, err := r.buildRedisKeys(entityKeys) + assert.Nil(t, err) + assert.Equal(t, 2, len(redisKeys)) + assert.Equal(t, 2, len(redisKeyToEntityIndex)) + }) + + t.Run("test with error in buildRedisKey", func(t *testing.T) { + entityKeys := []*types.EntityKey{&error_entity_key1} + _, _, err := r.buildRedisKeys(entityKeys) + assert.NotNil(t, err) + }) +} diff --git a/go/internal/feast/onlinestore/sqliteonlinestore_test.go b/go/internal/feast/onlinestore/sqliteonlinestore_test.go index 9a56f4df1a4..929af6d16b4 100644 --- a/go/internal/feast/onlinestore/sqliteonlinestore_test.go +++ b/go/internal/feast/onlinestore/sqliteonlinestore_test.go @@ -21,9 +21,10 @@ func TestSqliteAndFeatureRepoSetup(t *testing.T) { err := test.SetupCleanFeatureRepo(dir) assert.Nil(t, err) config, err := registry.NewRepoConfigFromFile(feature_repo_path) + registryConfig, err := config.GetRegistryConfig() assert.Nil(t, err) assert.Equal(t, "my_project", config.Project) - assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path) + assert.Equal(t, "data/registry.db", registryConfig.Path) assert.Equal(t, "local", config.Provider) assert.Equal(t, map[string]interface{}{ "path": "data/online_store.db", diff --git a/go/internal/feast/registry/local.go b/go/internal/feast/registry/local.go index 124fcba3ed9..e5343cd75cd 100644 --- a/go/internal/feast/registry/local.go +++ b/go/internal/feast/registry/local.go @@ -5,8 +5,8 @@ import ( "os" "path/filepath" - "github.com/golang/protobuf/proto" "github.com/google/uuid" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" "github.com/feast-dev/feast/go/protos/feast/core" diff --git a/go/internal/feast/registry/registry.go b/go/internal/feast/registry/registry.go index 9d0684d0230..a383dc42c07 100644 --- a/go/internal/feast/registry/registry.go +++ b/go/internal/feast/registry/registry.go @@ -8,6 +8,7 @@ import ( "time" "github.com/feast-dev/feast/go/internal/feast/model" + "github.com/rs/zerolog/log" "github.com/feast-dev/feast/go/protos/feast/core" ) @@ -26,6 +27,7 @@ var REGISTRY_STORE_CLASS_FOR_SCHEME map[string]string = map[string]string{ */ type Registry struct { + project string registryStore RegistryStore cachedFeatureServices map[string]map[string]*core.FeatureService cachedEntities map[string]map[string]*core.Entity @@ -35,24 +37,25 @@ type Registry struct { cachedRegistry *core.Registry cachedRegistryProtoLastUpdated time.Time cachedRegistryProtoTtl time.Duration - mu sync.Mutex + mu sync.RWMutex } -func NewRegistry(registryConfig *RegistryConfig, repoPath string) (*Registry, error) { +func NewRegistry(registryConfig *RegistryConfig, repoPath string, project string) (*Registry, error) { registryStoreType := registryConfig.RegistryStoreType registryPath := registryConfig.Path r := &Registry{ - cachedRegistryProtoTtl: time.Duration(registryConfig.CacheTtlSeconds), + project: project, + cachedRegistryProtoTtl: time.Duration(registryConfig.CacheTtlSeconds) * time.Second, } if len(registryStoreType) == 0 { - registryStore, err := getRegistryStoreFromScheme(registryPath, registryConfig, repoPath) + registryStore, err := getRegistryStoreFromScheme(registryPath, registryConfig, repoPath, project) if err != nil { return nil, err } r.registryStore = registryStore } else { - registryStore, err := getRegistryStoreFromType(registryStoreType, registryConfig, repoPath) + registryStore, err := getRegistryStoreFromType(registryStoreType, registryConfig, repoPath, project) if err != nil { return nil, err } @@ -62,26 +65,30 @@ func NewRegistry(registryConfig *RegistryConfig, repoPath string) (*Registry, er return r, nil } -func (r *Registry) InitializeRegistry() { +func (r *Registry) InitializeRegistry() error { _, err := r.getRegistryProto() if err != nil { + if _, ok := r.registryStore.(*FileRegistryStore); ok { + log.Error().Err(err).Msg("Registry Initialization Failed") + return err + } registryProto := &core.Registry{RegistrySchemaVersion: REGISTRY_SCHEMA_VERSION} r.registryStore.UpdateRegistryProto(registryProto) - go r.refreshRegistryOnInterval() } + go r.RefreshRegistryOnInterval() + return nil } -func (r *Registry) refreshRegistryOnInterval() { +func (r *Registry) RefreshRegistryOnInterval() { ticker := time.NewTicker(r.cachedRegistryProtoTtl) for ; true; <-ticker.C { err := r.refresh() if err != nil { - return + log.Error().Stack().Err(err).Msg("Registry refresh Failed") } } } -// TODO: Add a goroutine and automatically refresh every cachedRegistryProtoTtl func (r *Registry) refresh() error { _, err := r.getRegistryProto() return err @@ -94,7 +101,7 @@ func (r *Registry) getRegistryProto() (*core.Registry, error) { } registryProto, err := r.registryStore.GetRegistryProto() if err != nil { - return registryProto, err + return nil, err } r.load(registryProto) return registryProto, nil @@ -120,50 +127,50 @@ func (r *Registry) load(registry *core.Registry) { func (r *Registry) loadEntities(registry *core.Registry) { entities := registry.Entities for _, entity := range entities { - if _, ok := r.cachedEntities[entity.Spec.Project]; !ok { - r.cachedEntities[entity.Spec.Project] = make(map[string]*core.Entity) + if _, ok := r.cachedEntities[r.project]; !ok { + r.cachedEntities[r.project] = make(map[string]*core.Entity) } - r.cachedEntities[entity.Spec.Project][entity.Spec.Name] = entity + r.cachedEntities[r.project][entity.Spec.Name] = entity } } func (r *Registry) loadFeatureServices(registry *core.Registry) { featureServices := registry.FeatureServices for _, featureService := range featureServices { - if _, ok := r.cachedFeatureServices[featureService.Spec.Project]; !ok { - r.cachedFeatureServices[featureService.Spec.Project] = make(map[string]*core.FeatureService) + if _, ok := r.cachedFeatureServices[r.project]; !ok { + r.cachedFeatureServices[r.project] = make(map[string]*core.FeatureService) } - r.cachedFeatureServices[featureService.Spec.Project][featureService.Spec.Name] = featureService + r.cachedFeatureServices[r.project][featureService.Spec.Name] = featureService } } func (r *Registry) loadFeatureViews(registry *core.Registry) { featureViews := registry.FeatureViews for _, featureView := range featureViews { - if _, ok := r.cachedFeatureViews[featureView.Spec.Project]; !ok { - r.cachedFeatureViews[featureView.Spec.Project] = make(map[string]*core.FeatureView) + if _, ok := r.cachedFeatureViews[r.project]; !ok { + r.cachedFeatureViews[r.project] = make(map[string]*core.FeatureView) } - r.cachedFeatureViews[featureView.Spec.Project][featureView.Spec.Name] = featureView + r.cachedFeatureViews[r.project][featureView.Spec.Name] = featureView } } func (r *Registry) loadStreamFeatureViews(registry *core.Registry) { streamFeatureViews := registry.StreamFeatureViews for _, streamFeatureView := range streamFeatureViews { - if _, ok := r.cachedStreamFeatureViews[streamFeatureView.Spec.Project]; !ok { - r.cachedStreamFeatureViews[streamFeatureView.Spec.Project] = make(map[string]*core.StreamFeatureView) + if _, ok := r.cachedStreamFeatureViews[r.project]; !ok { + r.cachedStreamFeatureViews[r.project] = make(map[string]*core.StreamFeatureView) } - r.cachedStreamFeatureViews[streamFeatureView.Spec.Project][streamFeatureView.Spec.Name] = streamFeatureView + r.cachedStreamFeatureViews[r.project][streamFeatureView.Spec.Name] = streamFeatureView } } func (r *Registry) loadOnDemandFeatureViews(registry *core.Registry) { onDemandFeatureViews := registry.OnDemandFeatureViews for _, onDemandFeatureView := range onDemandFeatureViews { - if _, ok := r.cachedOnDemandFeatureViews[onDemandFeatureView.Spec.Project]; !ok { - r.cachedOnDemandFeatureViews[onDemandFeatureView.Spec.Project] = make(map[string]*core.OnDemandFeatureView) + if _, ok := r.cachedOnDemandFeatureViews[r.project]; !ok { + r.cachedOnDemandFeatureViews[r.project] = make(map[string]*core.OnDemandFeatureView) } - r.cachedOnDemandFeatureViews[onDemandFeatureView.Spec.Project][onDemandFeatureView.Spec.Name] = onDemandFeatureView + r.cachedOnDemandFeatureViews[r.project][onDemandFeatureView.Spec.Name] = onDemandFeatureView } } @@ -173,6 +180,8 @@ func (r *Registry) loadOnDemandFeatureViews(registry *core.Registry) { */ func (r *Registry) ListEntities(project string) ([]*model.Entity, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedEntities, ok := r.cachedEntities[project]; !ok { return []*model.Entity{}, nil } else { @@ -192,6 +201,8 @@ func (r *Registry) ListEntities(project string) ([]*model.Entity, error) { */ func (r *Registry) ListFeatureViews(project string) ([]*model.FeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedFeatureViews, ok := r.cachedFeatureViews[project]; !ok { return []*model.FeatureView{}, nil } else { @@ -211,6 +222,8 @@ func (r *Registry) ListFeatureViews(project string) ([]*model.FeatureView, error */ func (r *Registry) ListStreamFeatureViews(project string) ([]*model.FeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedStreamFeatureViews, ok := r.cachedStreamFeatureViews[project]; !ok { return []*model.FeatureView{}, nil } else { @@ -230,6 +243,8 @@ func (r *Registry) ListStreamFeatureViews(project string) ([]*model.FeatureView, */ func (r *Registry) ListFeatureServices(project string) ([]*model.FeatureService, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedFeatureServices, ok := r.cachedFeatureServices[project]; !ok { return []*model.FeatureService{}, nil } else { @@ -249,6 +264,8 @@ func (r *Registry) ListFeatureServices(project string) ([]*model.FeatureService, */ func (r *Registry) ListOnDemandFeatureViews(project string) ([]*model.OnDemandFeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedOnDemandFeatureViews, ok := r.cachedOnDemandFeatureViews[project]; !ok { return []*model.OnDemandFeatureView{}, nil } else { @@ -263,6 +280,8 @@ func (r *Registry) ListOnDemandFeatureViews(project string) ([]*model.OnDemandFe } func (r *Registry) GetEntity(project, entityName string) (*model.Entity, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedEntities, ok := r.cachedEntities[project]; !ok { return nil, fmt.Errorf("no cached entities found for project %s", project) } else { @@ -275,6 +294,8 @@ func (r *Registry) GetEntity(project, entityName string) (*model.Entity, error) } func (r *Registry) GetFeatureView(project, featureViewName string) (*model.FeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedFeatureViews, ok := r.cachedFeatureViews[project]; !ok { return nil, fmt.Errorf("no cached feature views found for project %s", project) } else { @@ -287,6 +308,8 @@ func (r *Registry) GetFeatureView(project, featureViewName string) (*model.Featu } func (r *Registry) GetStreamFeatureView(project, streamFeatureViewName string) (*model.FeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedStreamFeatureViews, ok := r.cachedStreamFeatureViews[project]; !ok { return nil, fmt.Errorf("no cached stream feature views found for project %s", project) } else { @@ -299,6 +322,8 @@ func (r *Registry) GetStreamFeatureView(project, streamFeatureViewName string) ( } func (r *Registry) GetFeatureService(project, featureServiceName string) (*model.FeatureService, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedFeatureServices, ok := r.cachedFeatureServices[project]; !ok { return nil, fmt.Errorf("no cached feature services found for project %s", project) } else { @@ -311,6 +336,8 @@ func (r *Registry) GetFeatureService(project, featureServiceName string) (*model } func (r *Registry) GetOnDemandFeatureView(project, onDemandFeatureViewName string) (*model.OnDemandFeatureView, error) { + r.mu.RLock() + defer r.mu.RUnlock() if cachedOnDemandFeatureViews, ok := r.cachedOnDemandFeatureViews[project]; !ok { return nil, fmt.Errorf("no cached on demand feature views found for project %s", project) } else { @@ -322,18 +349,18 @@ func (r *Registry) GetOnDemandFeatureView(project, onDemandFeatureViewName strin } } -func getRegistryStoreFromScheme(registryPath string, registryConfig *RegistryConfig, repoPath string) (RegistryStore, error) { +func getRegistryStoreFromScheme(registryPath string, registryConfig *RegistryConfig, repoPath string, project string) (RegistryStore, error) { uri, err := url.Parse(registryPath) if err != nil { return nil, err } if registryStoreType, ok := REGISTRY_STORE_CLASS_FOR_SCHEME[uri.Scheme]; ok { - return getRegistryStoreFromType(registryStoreType, registryConfig, repoPath) + return getRegistryStoreFromType(registryStoreType, registryConfig, repoPath, project) } return nil, fmt.Errorf("registry path %s has unsupported scheme %s. Supported schemes are file, s3 and gs", registryPath, uri.Scheme) } -func getRegistryStoreFromType(registryStoreType string, registryConfig *RegistryConfig, repoPath string) (RegistryStore, error) { +func getRegistryStoreFromType(registryStoreType string, registryConfig *RegistryConfig, repoPath string, project string) (RegistryStore, error) { switch registryStoreType { case "FileRegistryStore": return NewFileRegistryStore(registryConfig, repoPath), nil diff --git a/go/internal/feast/registry/repoconfig.go b/go/internal/feast/registry/repoconfig.go index b034b632dc0..f70310f261c 100644 --- a/go/internal/feast/registry/repoconfig.go +++ b/go/internal/feast/registry/repoconfig.go @@ -2,14 +2,18 @@ package registry import ( "encoding/json" - "io/ioutil" + "fmt" + "os" "path/filepath" + "time" + "github.com/feast-dev/feast/go/internal/feast/server/logging" "github.com/ghodss/yaml" ) const ( - defaultCacheTtlSeconds = 600 + defaultCacheTtlSeconds = int64(600) + defaultClientID = "Unknown" ) type RepoConfig struct { @@ -37,6 +41,7 @@ type RepoConfig struct { type RegistryConfig struct { RegistryStoreType string `json:"registry_store_type"` Path string `json:"path"` + ClientId string `json:"client_id" default:"Unknown"` CacheTtlSeconds int64 `json:"cache_ttl_seconds" default:"600"` } @@ -57,7 +62,7 @@ func NewRepoConfigFromJSON(repoPath, configJSON string) (*RepoConfig, error) { // NewRepoConfigFromFile reads the `feature_store.yaml` file in the repo path and converts it // into a RepoConfig struct. func NewRepoConfigFromFile(repoPath string) (*RepoConfig, error) { - data, err := ioutil.ReadFile(filepath.Join(repoPath, "feature_store.yaml")) + data, err := os.ReadFile(filepath.Join(repoPath, "feature_store.yaml")) if err != nil { return nil, err } @@ -66,17 +71,47 @@ func NewRepoConfigFromFile(repoPath string) (*RepoConfig, error) { return nil, err } + repoConfigWithEnv := os.ExpandEnv(string(data)) + config := RepoConfig{} - if err = yaml.Unmarshal(data, &config); err != nil { + if err = yaml.Unmarshal([]byte(repoConfigWithEnv), &config); err != nil { return nil, err } config.RepoPath = repoPath return &config, nil } -func (r *RepoConfig) GetRegistryConfig() *RegistryConfig { +func (r *RepoConfig) GetLoggingOptions() (*logging.LoggingOptions, error) { + loggingOptions := logging.LoggingOptions{} + if loggingOptionsMap, ok := r.FeatureServer["feature_logging"].(map[string]interface{}); ok { + loggingOptions = logging.DefaultOptions + for k, v := range loggingOptionsMap { + switch k { + case "queue_capacity": + if value, ok := v.(int); ok { + loggingOptions.ChannelCapacity = value + } + case "emit_timeout_micro_secs": + if value, ok := v.(int); ok { + loggingOptions.EmitTimeout = time.Duration(value) * time.Microsecond + } + case "write_to_disk_interval_secs": + if value, ok := v.(int); ok { + loggingOptions.WriteInterval = time.Duration(value) * time.Second + } + case "flush_interval_secs": + if value, ok := v.(int); ok { + loggingOptions.FlushInterval = time.Duration(value) * time.Second + } + } + } + } + return &loggingOptions, nil +} + +func (r *RepoConfig) GetRegistryConfig() (*RegistryConfig, error) { if registryConfigMap, ok := r.Registry.(map[string]interface{}); ok { - registryConfig := RegistryConfig{CacheTtlSeconds: defaultCacheTtlSeconds} + registryConfig := RegistryConfig{CacheTtlSeconds: defaultCacheTtlSeconds, ClientId: defaultClientID} for k, v := range registryConfigMap { switch k { case "path": @@ -87,14 +122,28 @@ func (r *RepoConfig) GetRegistryConfig() *RegistryConfig { if value, ok := v.(string); ok { registryConfig.RegistryStoreType = value } + case "client_id": + if value, ok := v.(string); ok { + registryConfig.ClientId = value + } case "cache_ttl_seconds": - if value, ok := v.(int64); ok { + // cache_ttl_seconds defaulted to type float64. Ex: "cache_ttl_seconds": 60 in registryConfigMap + switch value := v.(type) { + case float64: + registryConfig.CacheTtlSeconds = int64(value) + case int: + registryConfig.CacheTtlSeconds = int64(value) + case int32: + registryConfig.CacheTtlSeconds = int64(value) + case int64: registryConfig.CacheTtlSeconds = value + default: + return nil, fmt.Errorf("unexpected type %T for CacheTtlSeconds", v) } } } - return ®istryConfig + return ®istryConfig, nil } else { - return &RegistryConfig{Path: r.Registry.(string), CacheTtlSeconds: defaultCacheTtlSeconds} + return &RegistryConfig{Path: r.Registry.(string), ClientId: defaultClientID, CacheTtlSeconds: defaultCacheTtlSeconds}, nil } } diff --git a/go/internal/feast/registry/repoconfig_test.go b/go/internal/feast/registry/repoconfig_test.go index 848977886c9..4d30bf7bca0 100644 --- a/go/internal/feast/registry/repoconfig_test.go +++ b/go/internal/feast/registry/repoconfig_test.go @@ -3,8 +3,11 @@ package registry import ( "os" "path/filepath" + "strings" "testing" + "time" + "github.com/feast-dev/feast/go/internal/feast/server/logging" "github.com/stretchr/testify/assert" ) @@ -26,10 +29,11 @@ online_store: err = os.WriteFile(filePath, data, 0666) assert.Nil(t, err) config, err := NewRepoConfigFromFile(dir) + registryConfig, err := config.GetRegistryConfig() assert.Nil(t, err) assert.Equal(t, "feature_repo", config.Project) assert.Equal(t, dir, config.RepoPath) - assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path) + assert.Equal(t, "data/registry.db", registryConfig.Path) assert.Equal(t, "local", config.Provider) assert.Equal(t, map[string]interface{}{ "type": "redis", @@ -40,6 +44,40 @@ online_store: assert.Empty(t, config.Flags) } +func TestNewRepoConfigWithEnvironmentVariables(t *testing.T) { + dir, err := os.MkdirTemp("", "feature_repo_*") + assert.Nil(t, err) + defer func() { + assert.Nil(t, os.RemoveAll(dir)) + }() + filePath := filepath.Join(dir, "feature_store.yaml") + data := []byte(` +project: feature_repo +registry: "data/registry.db" +provider: local +online_store: + type: redis + connection_string: ${REDIS_CONNECTION_STRING} +`) + err = os.WriteFile(filePath, data, 0666) + assert.Nil(t, err) + os.Setenv("REDIS_CONNECTION_STRING", "localhost:6380") + config, err := NewRepoConfigFromFile(dir) + registryConfig, err := config.GetRegistryConfig() + assert.Nil(t, err) + assert.Equal(t, "feature_repo", config.Project) + assert.Equal(t, dir, config.RepoPath) + assert.Equal(t, "data/registry.db", registryConfig.Path) + assert.Equal(t, "local", config.Provider) + assert.Equal(t, map[string]interface{}{ + "type": "redis", + "connection_string": "localhost:6380", + }, config.OnlineStore) + assert.Empty(t, config.OfflineStore) + assert.Empty(t, config.FeatureServer) + assert.Empty(t, config.Flags) +} + func TestNewRepoConfigRegistryMap(t *testing.T) { dir, err := os.MkdirTemp("", "feature_repo_*") assert.Nil(t, err) @@ -50,6 +88,7 @@ func TestNewRepoConfigRegistryMap(t *testing.T) { data := []byte(` registry: path: data/registry.db + client_id: "test_client_id" project: feature_repo provider: local online_store: @@ -59,10 +98,12 @@ online_store: err = os.WriteFile(filePath, data, 0666) assert.Nil(t, err) config, err := NewRepoConfigFromFile(dir) + registryConfig, err := config.GetRegistryConfig() assert.Nil(t, err) assert.Equal(t, "feature_repo", config.Project) assert.Equal(t, dir, config.RepoPath) - assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path) + assert.Equal(t, "data/registry.db", registryConfig.Path) + assert.Equal(t, "test_client_id", registryConfig.ClientId) assert.Equal(t, "local", config.Provider) assert.Equal(t, map[string]interface{}{ "type": "redis", @@ -83,6 +124,7 @@ func TestNewRepoConfigRegistryConfig(t *testing.T) { data := []byte(` registry: path: data/registry.db + client_id: "test_client_id" project: feature_repo provider: local online_store: @@ -92,7 +134,206 @@ online_store: err = os.WriteFile(filePath, data, 0666) assert.Nil(t, err) config, err := NewRepoConfigFromFile(dir) + registryConfig, err := config.GetRegistryConfig() assert.Nil(t, err) assert.Equal(t, dir, config.RepoPath) - assert.Equal(t, "data/registry.db", config.GetRegistryConfig().Path) + assert.Equal(t, "data/registry.db", registryConfig.Path) + assert.Equal(t, "test_client_id", registryConfig.ClientId) +} +func TestNewRepoConfigFromJSON(t *testing.T) { + // Create a temporary directory for the test + dir, err := os.MkdirTemp("", "feature_repo_*") + assert.Nil(t, err) + defer func() { + assert.Nil(t, os.RemoveAll(dir)) + }() + + // Define a JSON string for the test + registry_path := filepath.Join(dir, "data/registry.db") + + configJSON := `{ + "project": "feature_repo", + "registry": "$REGISTRY_PATH", + "provider": "local", + "online_store": { + "type": "redis", + "connection_string": "localhost:6379" + } + }` + + replacements := map[string]string{ + "$REGISTRY_PATH": registry_path, + } + + // Replace the variables in the JSON string + for variable, replacement := range replacements { + configJSON = strings.ReplaceAll(configJSON, variable, replacement) + } + + // Call the function under test + config, err := NewRepoConfigFromJSON(dir, configJSON) + registryConfig, err := config.GetRegistryConfig() + // Assert that there was no error and that the config was correctly parsed + assert.Nil(t, err) + assert.Equal(t, "feature_repo", config.Project) + assert.Equal(t, filepath.Join(dir, "data/registry.db"), registryConfig.Path) + assert.Equal(t, "local", config.Provider) + assert.Equal(t, map[string]interface{}{ + "type": "redis", + "connection_string": "localhost:6379", + }, config.OnlineStore) + assert.Empty(t, config.OfflineStore) + assert.Empty(t, config.FeatureServer) + assert.Empty(t, config.Flags) +} + +func TestGetRegistryConfig_Map(t *testing.T) { + // Create a RepoConfig with a map Registry + config := &RepoConfig{ + Registry: map[string]interface{}{ + "path": "data/registry.db", + "registry_store_type": "local", + "client_id": "test_client_id", + "cache_ttl_seconds": 60, + }, + } + + // Call the method under test + registryConfig, _ := config.GetRegistryConfig() + + // Assert that the method correctly processed the map + assert.Equal(t, "data/registry.db", registryConfig.Path) + assert.Equal(t, "local", registryConfig.RegistryStoreType) + assert.Equal(t, int64(60), registryConfig.CacheTtlSeconds) + assert.Equal(t, "test_client_id", registryConfig.ClientId) +} + +func TestGetRegistryConfig_String(t *testing.T) { + // Create a RepoConfig with a string Registry + config := &RepoConfig{ + Registry: "data/registry.db", + } + + // Call the method under test + registryConfig, _ := config.GetRegistryConfig() + + // Assert that the method correctly processed the string + assert.Equal(t, "data/registry.db", registryConfig.Path) + assert.Equal(t, defaultClientID, registryConfig.ClientId) + println(registryConfig.CacheTtlSeconds) + assert.Empty(t, registryConfig.RegistryStoreType) + assert.Equal(t, defaultCacheTtlSeconds, registryConfig.CacheTtlSeconds) +} + +func TestGetRegistryConfig_CacheTtlSecondsTypes(t *testing.T) { + // Create RepoConfigs with different types for cache_ttl_seconds + configs := []*RepoConfig{ + { + Registry: map[string]interface{}{ + "cache_ttl_seconds": float64(60), + }, + }, + { + Registry: map[string]interface{}{ + "cache_ttl_seconds": int32(60), + }, + }, + { + Registry: map[string]interface{}{ + "cache_ttl_seconds": int64(60), + }, + }, + } + + for _, config := range configs { + // Call the method under test + registryConfig, _ := config.GetRegistryConfig() + + // Assert that the method correctly processed cache_ttl_seconds + assert.Equal(t, int64(60), registryConfig.CacheTtlSeconds) + } +} + +func TestGetLoggingOptions_Defaults(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{}, + }, + } + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, logging.DefaultOptions, *options) +} + +func TestGetLoggingOptions_QueueCapacity(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{ + "queue_capacity": 100, + }, + }, + } + expected := logging.DefaultOptions + expected.ChannelCapacity = 100 + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, expected, *options) +} + +func TestGetLoggingOptions_EmitTimeoutMicroSecs(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{ + "emit_timeout_micro_secs": 500, + }, + }, + } + expected := logging.DefaultOptions + expected.EmitTimeout = 500 * time.Microsecond + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, expected, *options) +} + +func TestGetLoggingOptions_WriteToDiskIntervalSecs(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{ + "write_to_disk_interval_secs": 10, + }, + }, + } + expected := logging.DefaultOptions + expected.WriteInterval = 10 * time.Second + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, expected, *options) +} + +func TestGetLoggingOptions_FlushIntervalSecs(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{ + "flush_interval_secs": 15, + }, + }, + } + expected := logging.DefaultOptions + expected.FlushInterval = 15 * time.Second + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, expected, *options) +} + +func TestGetLoggingOptions_InvalidType(t *testing.T) { + config := RepoConfig{ + FeatureServer: map[string]interface{}{ + "feature_logging": map[string]interface{}{ + "queue_capacity": "invalid", + }, + }, + } + options, err := config.GetLoggingOptions() + assert.Nil(t, err) + assert.Equal(t, logging.DefaultOptions, *options) } diff --git a/go/internal/feast/server/grpc_server.go b/go/internal/feast/server/grpc_server.go index c47d185d6c1..d5e18b1c9ef 100644 --- a/go/internal/feast/server/grpc_server.go +++ b/go/internal/feast/server/grpc_server.go @@ -3,14 +3,13 @@ package server import ( "context" "fmt" - - "github.com/google/uuid" - "github.com/feast-dev/feast/go/internal/feast" "github.com/feast-dev/feast/go/internal/feast/server/logging" "github.com/feast-dev/feast/go/protos/feast/serving" prototypes "github.com/feast-dev/feast/go/protos/feast/types" "github.com/feast-dev/feast/go/types" + "github.com/google/uuid" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) const feastServerVersion = "0.0.1" @@ -31,15 +30,23 @@ func (s *grpcServingServiceServer) GetFeastServingInfo(ctx context.Context, requ }, nil } -// Returns an object containing the response to GetOnlineFeatures. -// Metadata contains featurenames that corresponds to the number of rows in response.Results. +// GetOnlineFeatures Returns an object containing the response to GetOnlineFeatures. +// Metadata contains feature names that corresponds to the number of rows in response.Results. // Results contains values including the value of the feature, the event timestamp, and feature status in a columnar format. func (s *grpcServingServiceServer) GetOnlineFeatures(ctx context.Context, request *serving.GetOnlineFeaturesRequest) (*serving.GetOnlineFeaturesResponse, error) { + //span, ctx := tracer.StartSpanFromContext(ctx, "getOnlineFeatures", tracer.ResourceName("ServingService/GetOnlineFeatures")) + //defer span.Finish() + + //logSpanContext := LogWithSpanContext(span) + requestId := GenerateRequestId() featuresOrService, err := s.fs.ParseFeatures(request.GetKind()) + if err != nil { + //logSpanContext.Error().Err(err).Msg("Error parsing feature service or feature list from request") return nil, err } + featureVectors, err := s.fs.GetOnlineFeatures( ctx, featuresOrService.FeaturesRefs, @@ -47,7 +54,9 @@ func (s *grpcServingServiceServer) GetOnlineFeatures(ctx context.Context, reques request.GetEntities(), request.GetRequestContext(), request.GetFullFeatureNames()) + if err != nil { + //logSpanContext.Error().Err(err).Msg("Error getting online features") return nil, err } @@ -66,6 +75,7 @@ func (s *grpcServingServiceServer) GetOnlineFeatures(ctx context.Context, reques featureNames[idx] = vector.Name values, err := types.ArrowValuesToProtoValues(vector.Values) if err != nil { + //logSpanContext.Error().Err(err).Msg("Error converting Arrow values to proto values") return nil, err } if _, ok := request.Entities[vector.Name]; ok { @@ -83,11 +93,13 @@ func (s *grpcServingServiceServer) GetOnlineFeatures(ctx context.Context, reques if featureService != nil && featureService.LoggingConfig != nil && s.loggingService != nil { logger, err := s.loggingService.GetOrCreateLogger(featureService) if err != nil { + //logSpanContext.Error().Err(err).Msg("Error to instantiating logger for feature service: " + featuresOrService.FeatureService.Name) fmt.Printf("Couldn't instantiate logger for feature service %s: %+v", featuresOrService.FeatureService.Name, err) } err = logger.Log(request.Entities, resp.Results[len(request.Entities):], resp.Metadata.FeatureNames.Val[len(request.Entities):], request.RequestContext, requestId) if err != nil { + //logSpanContext.Error().Err(err).Msg("Error to logging to feature service: " + featuresOrService.FeatureService.Name) fmt.Printf("LoggerImpl error[%s]: %+v", featuresOrService.FeatureService.Name, err) } } diff --git a/go/internal/feast/server/grpc_server_test.go b/go/internal/feast/server/grpc_server_test.go index 52960321319..3ef7a6aa8a3 100644 --- a/go/internal/feast/server/grpc_server_test.go +++ b/go/internal/feast/server/grpc_server_test.go @@ -15,10 +15,10 @@ import ( "github.com/feast-dev/feast/go/internal/feast/registry" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/memory" - "github.com/apache/arrow/go/v8/parquet/file" - "github.com/apache/arrow/go/v8/parquet/pqarrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" + "github.com/apache/arrow/go/v17/parquet/file" + "github.com/apache/arrow/go/v17/parquet/pqarrow" "github.com/stretchr/testify/assert" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" diff --git a/go/internal/feast/server/http_server.go b/go/internal/feast/server/http_server.go index 7ebab429e7e..def58aedb88 100644 --- a/go/internal/feast/server/http_server.go +++ b/go/internal/feast/server/http_server.go @@ -5,6 +5,10 @@ import ( "encoding/json" "fmt" "net/http" + //"os" + "runtime" + "strconv" + //"strings" "time" "github.com/feast-dev/feast/go/internal/feast" @@ -14,6 +18,9 @@ import ( "github.com/feast-dev/feast/go/protos/feast/serving" prototypes "github.com/feast-dev/feast/go/protos/feast/types" "github.com/feast-dev/feast/go/types" + "github.com/rs/zerolog/log" + //httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) type httpServer struct { @@ -140,23 +147,45 @@ func NewHttpServer(fs *feast.FeatureStore, loggingService *logging.LoggingServic } func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { + var err error + + ctx := r.Context() + //span, ctx := tracer.StartSpanFromContext(r.Context(), "getOnlineFeatures", tracer.ResourceName("/get-online-features")) + //defer span.Finish(tracer.WithError(err)) + + //logSpanContext := LogWithSpanContext(span) + if r.Method != "POST" { http.NotFound(w, r) return } + statusQuery := r.URL.Query().Get("status") + + status := false + if statusQuery != "" { + status, err = strconv.ParseBool(statusQuery) + if err != nil { + //logSpanContext.Error().Err(err).Msg("Error parsing status query parameter") + writeJSONError(w, fmt.Errorf("Error parsing status query parameter: %+v", err), http.StatusBadRequest) + return + } + } + decoder := json.NewDecoder(r.Body) var request getOnlineFeaturesRequest - err := decoder.Decode(&request) + err = decoder.Decode(&request) if err != nil { - http.Error(w, fmt.Sprintf("Error decoding JSON request data: %+v", err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msg("Error decoding JSON request data") + writeJSONError(w, fmt.Errorf("Error decoding JSON request data: %+v", err), http.StatusInternalServerError) return } var featureService *model.FeatureService if request.FeatureService != nil { featureService, err = s.fs.GetFeatureService(*request.FeatureService) if err != nil { - http.Error(w, fmt.Sprintf("Error getting feature service from registry: %+v", err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msg("Error getting feature service from registry") + writeJSONError(w, fmt.Errorf("Error getting feature service from registry: %+v", err), http.StatusInternalServerError) return } } @@ -170,7 +199,7 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { } featureVectors, err := s.fs.GetOnlineFeatures( - r.Context(), + ctx, request.Features, featureService, entitiesProto, @@ -178,7 +207,8 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { request.FullFeatureNames) if err != nil { - http.Error(w, fmt.Sprintf("Error getting feature vector: %+v", err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msg("Error getting feature vector") + writeJSONError(w, fmt.Errorf("Error getting feature vector: %+v", err), http.StatusInternalServerError) return } @@ -187,17 +217,19 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { for _, vector := range featureVectors { featureNames = append(featureNames, vector.Name) result := make(map[string]interface{}) - var statuses []string - for _, status := range vector.Statuses { - statuses = append(statuses, status.String()) - } - var timestamps []string - for _, timestamp := range vector.Timestamps { - timestamps = append(timestamps, timestamp.AsTime().Format(time.RFC3339)) - } + if status { + var statuses []string + for _, status := range vector.Statuses { + statuses = append(statuses, status.String()) + } + var timestamps []string + for _, timestamp := range vector.Timestamps { + timestamps = append(timestamps, timestamp.AsTime().Format(time.RFC3339)) + } - result["statuses"] = statuses - result["event_timestamps"] = timestamps + result["statuses"] = statuses + result["event_timestamps"] = timestamps + } // Note, that vector.Values is an Arrow Array, but this type implements JSON Marshaller. // So, it's not necessary to pre-process it in any way. result["values"] = vector.Values @@ -217,14 +249,16 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { err = json.NewEncoder(w).Encode(response) if err != nil { - http.Error(w, fmt.Sprintf("Error encoding response: %+v", err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msg("Error encoding response") + writeJSONError(w, fmt.Errorf("Error encoding response: %+v", err), http.StatusInternalServerError) return } if featureService != nil && featureService.LoggingConfig != nil && s.loggingService != nil { logger, err := s.loggingService.GetOrCreateLogger(featureService) if err != nil { - http.Error(w, fmt.Sprintf("Couldn't instantiate logger for feature service %s: %+v", featureService.Name, err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msgf("Couldn't instantiate logger for feature service %s", featureService.Name) + writeJSONError(w, fmt.Errorf("Couldn't instantiate logger for feature service %s: %+v", featureService.Name, err), http.StatusInternalServerError) return } @@ -236,7 +270,8 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { for _, vector := range featureVectors[len(request.Entities):] { values, err := types.ArrowValuesToProtoValues(vector.Values) if err != nil { - http.Error(w, fmt.Sprintf("Couldn't convert arrow values into protobuf: %+v", err), http.StatusInternalServerError) + //logSpanContext.Error().Err(err).Msg("Couldn't convert arrow values into protobuf") + writeJSONError(w, fmt.Errorf("Couldn't convert arrow values into protobuf: %+v", err), http.StatusInternalServerError) return } featureVectorProtos = append(featureVectorProtos, &serving.GetOnlineFeaturesResponse_FeatureVector{ @@ -248,10 +283,11 @@ func (s *httpServer) getOnlineFeatures(w http.ResponseWriter, r *http.Request) { err = logger.Log(entitiesProto, featureVectorProtos, featureNames[len(request.Entities):], requestContextProto, requestId) if err != nil { - http.Error(w, fmt.Sprintf("LoggerImpl error[%s]: %+v", featureService.Name, err), http.StatusInternalServerError) + writeJSONError(w, fmt.Errorf("LoggerImpl error[%s]: %+v", featureService.Name, err), http.StatusInternalServerError) return } } + go releaseCGOMemory(featureVectors) } @@ -261,15 +297,64 @@ func releaseCGOMemory(featureVectors []*onlineserving.FeatureVector) { } } +func logStackTrace() { + // Start with a small buffer and grow it until the full stack trace fits. + buf := make([]byte, 1024) + for { + stackSize := runtime.Stack(buf, false) + if stackSize < len(buf) { + // The stack trace fits in the buffer, so we can log it now. + log.Error().Str("stack_trace", string(buf[:stackSize])).Msg("") + return + } + // The stack trace doesn't fit in the buffer, so we need to grow the buffer and try again. + buf = make([]byte, 2*len(buf)) + } +} + +func writeJSONError(w http.ResponseWriter, err error, statusCode int) { + errMap := map[string]interface{}{ + "error": fmt.Sprintf("%+v", err), + "status_code": statusCode, + } + errJSON, _ := json.Marshal(errMap) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + w.Write(errJSON) +} + +func recoverMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if r := recover(); r != nil { + log.Error().Err(fmt.Errorf("Panic recovered: %v", r)).Msg("A panic occurred in the server") + // Log the stack trace + logStackTrace() + + writeJSONError(w, fmt.Errorf("Internal Server Error: %v", r), http.StatusInternalServerError) + } + }() + next.ServeHTTP(w, r) + }) +} + func (s *httpServer) Serve(host string, port int) error { - s.server = &http.Server{Addr: fmt.Sprintf("%s:%d", host, port), Handler: nil} - http.HandleFunc("/get-online-features", s.getOnlineFeatures) - http.HandleFunc("/health", healthCheckHandler) + // DD + //if strings.ToLower(os.Getenv("ENABLE_DATADOG_TRACING")) == "true" { + // tracer.Start(tracer.WithRuntimeMetrics()) + // defer tracer.Stop() + //} + mux := http.NewServeMux() + mux.Handle("/get-online-features", recoverMiddleware(http.HandlerFunc(s.getOnlineFeatures))) + mux.HandleFunc("/health", healthCheckHandler) + s.server = &http.Server{Addr: fmt.Sprintf("%s:%d", host, port), Handler: mux, ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, IdleTimeout: 15 * time.Second} err := s.server.ListenAndServe() // Don't return the error if it's caused by graceful shutdown using Stop() if err == http.ErrServerClosed { return nil } + log.Fatal().Stack().Err(err).Msg("Failed to start HTTP server") return err } @@ -277,7 +362,6 @@ func healthCheckHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "Healthy") } - func (s *httpServer) Stop() error { if s.server != nil { return s.server.Shutdown(context.Background()) diff --git a/go/internal/feast/server/http_server_test.go b/go/internal/feast/server/http_server_test.go index 67ba1c60f96..e0d474a9f34 100644 --- a/go/internal/feast/server/http_server_test.go +++ b/go/internal/feast/server/http_server_test.go @@ -1,8 +1,13 @@ package server import ( - "github.com/stretchr/testify/assert" + "encoding/json" "testing" + + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" + "github.com/stretchr/testify/assert" ) func TestUnmarshalJSON(t *testing.T) { @@ -38,3 +43,36 @@ func TestUnmarshalJSON(t *testing.T) { assert.Nil(t, u.UnmarshalJSON([]byte("[[true, false, true], [false, true, false]]"))) assert.Equal(t, [][]bool{{true, false, true}, {false, true, false}}, u.boolListVal) } +func TestMarshalInt32JSON(t *testing.T) { + var arrowArray arrow.Array + memoryPool := memory.NewGoAllocator() + builder := array.NewInt32Builder(memoryPool) + defer builder.Release() + builder.AppendValues([]int32{1, 2, 3, 4}, nil) + arrowArray = builder.NewArray() + defer arrowArray.Release() + expectedJSON := `[1,2,3,4]` + + jsonData, err := json.Marshal(arrowArray) + assert.NoError(t, err, "Error marshaling Arrow array") + + assert.Equal(t, expectedJSON, string(jsonData), "JSON output does not match expected") + assert.IsType(t, &array.Int32{}, arrowArray, "arrowArray is not of type *array.Int32") +} + +func TestMarshalInt64JSON(t *testing.T) { + var arrowArray arrow.Array + memoryPool := memory.NewGoAllocator() + builder := array.NewInt64Builder(memoryPool) + defer builder.Release() + builder.AppendValues([]int64{-9223372036854775808, 9223372036854775807}, nil) + arrowArray = builder.NewArray() + defer arrowArray.Release() + expectedJSON := `[-9223372036854775808,9223372036854775807]` + + jsonData, err := json.Marshal(arrowArray) + assert.NoError(t, err, "Error marshaling Arrow array") + + assert.Equal(t, expectedJSON, string(jsonData), "JSON output does not match expected") + assert.IsType(t, &array.Int64{}, arrowArray, "arrowArray is not of type *array.Int64") +} diff --git a/go/internal/feast/server/logging/filelogsink.go b/go/internal/feast/server/logging/filelogsink.go index d9796d69d10..ae33e61a658 100644 --- a/go/internal/feast/server/logging/filelogsink.go +++ b/go/internal/feast/server/logging/filelogsink.go @@ -8,12 +8,12 @@ import ( "github.com/pkg/errors" - "github.com/apache/arrow/go/v8/arrow" + "github.com/apache/arrow/go/v17/arrow" "github.com/google/uuid" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/parquet" - "github.com/apache/arrow/go/v8/parquet/pqarrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/parquet" + "github.com/apache/arrow/go/v17/parquet/pqarrow" ) type FileLogSink struct { diff --git a/go/internal/feast/server/logging/logger.go b/go/internal/feast/server/logging/logger.go index 0e4f230f5ad..edea8aa1abb 100644 --- a/go/internal/feast/server/logging/logger.go +++ b/go/internal/feast/server/logging/logger.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/apache/arrow/go/v8/arrow" + "github.com/apache/arrow/go/v17/arrow" "github.com/pkg/errors" "google.golang.org/protobuf/types/known/timestamppb" diff --git a/go/internal/feast/server/logging/logger_test.go b/go/internal/feast/server/logging/logger_test.go index ddc1902b7d1..b81179f2d29 100644 --- a/go/internal/feast/server/logging/logger_test.go +++ b/go/internal/feast/server/logging/logger_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/memory" - "github.com/apache/arrow/go/v8/parquet/file" - "github.com/apache/arrow/go/v8/parquet/pqarrow" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" + "github.com/apache/arrow/go/v17/parquet/file" + "github.com/apache/arrow/go/v17/parquet/pqarrow" "github.com/stretchr/testify/require" "github.com/feast-dev/feast/go/protos/feast/types" diff --git a/go/internal/feast/server/logging/memorybuffer.go b/go/internal/feast/server/logging/memorybuffer.go index c9f00218dfc..cd97327a4aa 100644 --- a/go/internal/feast/server/logging/memorybuffer.go +++ b/go/internal/feast/server/logging/memorybuffer.go @@ -2,9 +2,10 @@ package logging import ( "fmt" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/memory" + + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" "github.com/feast-dev/feast/go/protos/feast/types" gotypes "github.com/feast-dev/feast/go/types" @@ -128,7 +129,7 @@ func getArrowSchema(schema *FeatureServiceSchema) (*arrow.Schema, error) { // and writes them to arrow table. // Returns arrow table that contains all of the logs in columnar format. func (b *MemoryBuffer) convertToArrowRecord() (arrow.Record, error) { - arrowMemory := memory.NewCgoArrowAllocator() + arrowMemory := memory.NewGoAllocator() numRows := len(b.logs) columns := make(map[string][]*types.Value) diff --git a/go/internal/feast/server/logging/memorybuffer_test.go b/go/internal/feast/server/logging/memorybuffer_test.go index ec83680f4ff..6c6db8fc880 100644 --- a/go/internal/feast/server/logging/memorybuffer_test.go +++ b/go/internal/feast/server/logging/memorybuffer_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/memory" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/types/known/timestamppb" @@ -118,7 +118,7 @@ func TestSerializeToArrowTable(t *testing.T) { LogTimestamp: time.Now(), }) - pool := memory.NewCgoArrowAllocator() + pool := memory.NewGoAllocator() builder := array.NewRecordBuilder(pool, b.arrowSchema) defer builder.Release() diff --git a/go/internal/feast/server/logging/offlinestoresink.go b/go/internal/feast/server/logging/offlinestoresink.go index 632039baa43..b0f247ce6e1 100644 --- a/go/internal/feast/server/logging/offlinestoresink.go +++ b/go/internal/feast/server/logging/offlinestoresink.go @@ -8,10 +8,10 @@ import ( "os" "path/filepath" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/parquet" - "github.com/apache/arrow/go/v8/parquet/pqarrow" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/parquet" + "github.com/apache/arrow/go/v17/parquet/pqarrow" "github.com/google/uuid" ) diff --git a/go/internal/feast/server/server_commons.go b/go/internal/feast/server/server_commons.go new file mode 100644 index 00000000000..140269d5c1c --- /dev/null +++ b/go/internal/feast/server/server_commons.go @@ -0,0 +1,31 @@ +package server + +import ( + "github.com/rs/zerolog" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "os" +) + +func LogWiwithSpanContext() zerolog.Logger { + var logger = zerolog.New(os.Stderr).With(). + Timestamp(). + Logger() + + return logger +} + +/* +func LogWithSpanContext(span tracer.Span) zerolog.Logger { + spanContext := span.Context() + + var logger = zerolog.New(os.Stderr).With(). + Timestamp(). + Logger() + //Int64("trace_id", int64(spanContext.TraceID())). + //Int64("span_id", int64(spanContext.SpanID())). + //Timestamp(). + //Logger() + + return logger +} +*/ diff --git a/go/internal/feast/transformation/transformation.go b/go/internal/feast/transformation/transformation.go index 7e63aec2243..d6df03039d7 100644 --- a/go/internal/feast/transformation/transformation.go +++ b/go/internal/feast/transformation/transformation.go @@ -1,20 +1,18 @@ package transformation import ( - "errors" + "context" "fmt" + "runtime" "strings" - "unsafe" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/cdata" - "github.com/apache/arrow/go/v8/arrow/memory" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/memory" + "github.com/rs/zerolog/log" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "github.com/feast-dev/feast/go/internal/feast/model" "github.com/feast-dev/feast/go/internal/feast/onlineserving" - "github.com/feast-dev/feast/go/protos/feast/serving" prototypes "github.com/feast-dev/feast/go/protos/feast/types" "github.com/feast-dev/feast/go/types" ) @@ -24,20 +22,27 @@ TransformationCallback is a Python callback function's expected signature. The function should accept name of the on demand feature view and pointers to input & output record batches. Each record batch is being passed as two pointers: pointer to array (data) and pointer to schema. Python function is expected to return number of rows added to the output record batch. + +[11-20-2024] Use a Transformation GRPC service, like the Python version one, for better scalability. */ type TransformationCallback func(ODFVName string, inputArrPtr, inputSchemaPtr, outArrPtr, outSchemaPtr uintptr, fullFeatureNames bool) int func AugmentResponseWithOnDemandTransforms( + ctx context.Context, onDemandFeatureViews []*model.OnDemandFeatureView, requestData map[string]*prototypes.RepeatedValue, entityRows map[string]*prototypes.RepeatedValue, features []*onlineserving.FeatureVector, transformationCallback TransformationCallback, + transformationService *GrpcTransformationService, arrowMemory memory.Allocator, numRows int, fullFeatureNames bool, ) ([]*onlineserving.FeatureVector, error) { + //span, _ := tracer.StartSpanFromContext(ctx, "transformation.AugmentResponseWithOnDemandTransforms") + //defer span.Finish() + result := make([]*onlineserving.FeatureVector, 0) var err error @@ -64,17 +69,20 @@ func AugmentResponseWithOnDemandTransforms( retrievedFeatures[vector.Name] = vector.Values } - onDemandFeatures, err := CallTransformations( - odfv, - retrievedFeatures, - requestContextArrow, - transformationCallback, - numRows, - fullFeatureNames, - ) - if err != nil { - ReleaseArrowContext(requestContextArrow) - return nil, err + var onDemandFeatures []*onlineserving.FeatureVector + if transformationService != nil { + onDemandFeatures, err = transformationService.GetTransformation( + ctx, + odfv, + retrievedFeatures, + requestContextArrow, + numRows, + fullFeatureNames, + ) + if err != nil { + ReleaseArrowContext(requestContextArrow) + return nil, err + } } result = append(result, onDemandFeatures...) @@ -91,103 +99,6 @@ func ReleaseArrowContext(requestContextArrow map[string]arrow.Array) { } } -func CallTransformations( - featureView *model.OnDemandFeatureView, - retrievedFeatures map[string]arrow.Array, - requestContext map[string]arrow.Array, - callback TransformationCallback, - numRows int, - fullFeatureNames bool, -) ([]*onlineserving.FeatureVector, error) { - - inputArr := cdata.CArrowArray{} - inputSchema := cdata.CArrowSchema{} - - outArr := cdata.CArrowArray{} - outSchema := cdata.CArrowSchema{} - - defer cdata.ReleaseCArrowArray(&inputArr) - defer cdata.ReleaseCArrowArray(&outArr) - defer cdata.ReleaseCArrowSchema(&inputSchema) - defer cdata.ReleaseCArrowSchema(&outSchema) - - inputArrPtr := uintptr(unsafe.Pointer(&inputArr)) - inputSchemaPtr := uintptr(unsafe.Pointer(&inputSchema)) - - outArrPtr := uintptr(unsafe.Pointer(&outArr)) - outSchemaPtr := uintptr(unsafe.Pointer(&outSchema)) - - inputFields := make([]arrow.Field, 0) - inputColumns := make([]arrow.Array, 0) - for name, arr := range retrievedFeatures { - inputFields = append(inputFields, arrow.Field{Name: name, Type: arr.DataType()}) - inputColumns = append(inputColumns, arr) - } - for name, arr := range requestContext { - inputFields = append(inputFields, arrow.Field{Name: name, Type: arr.DataType()}) - inputColumns = append(inputColumns, arr) - } - - inputRecord := array.NewRecord(arrow.NewSchema(inputFields, nil), inputColumns, int64(numRows)) - defer inputRecord.Release() - - cdata.ExportArrowRecordBatch(inputRecord, &inputArr, &inputSchema) - - ret := callback(featureView.Base.Name, inputArrPtr, inputSchemaPtr, outArrPtr, outSchemaPtr, fullFeatureNames) - - if ret != numRows { - return nil, errors.New("python transformation callback failed") - } - - outRecord, err := cdata.ImportCRecordBatch(&outArr, &outSchema) - if err != nil { - return nil, err - } - - result := make([]*onlineserving.FeatureVector, 0) - for idx, field := range outRecord.Schema().Fields() { - dropFeature := true - - if featureView.Base.Projection != nil { - var featureName string - if fullFeatureNames { - featureName = strings.Split(field.Name, "__")[1] - } else { - featureName = field.Name - } - - for _, feature := range featureView.Base.Projection.Features { - if featureName == feature.Name { - dropFeature = false - } - } - } else { - dropFeature = false - } - - if dropFeature { - continue - } - - statuses := make([]serving.FieldStatus, numRows) - timestamps := make([]*timestamppb.Timestamp, numRows) - - for idx := 0; idx < numRows; idx++ { - statuses[idx] = serving.FieldStatus_PRESENT - timestamps[idx] = timestamppb.Now() - } - - result = append(result, &onlineserving.FeatureVector{ - Name: field.Name, - Values: outRecord.Column(idx), - Statuses: statuses, - Timestamps: timestamps, - }) - } - - return result, nil -} - func EnsureRequestedDataExist(requestedOnDemandFeatureViews []*model.OnDemandFeatureView, requestDataFeatures map[string]*prototypes.RepeatedValue) error { @@ -220,3 +131,15 @@ func getNeededRequestData(requestedOnDemandFeatureViews []*model.OnDemandFeature return neededRequestData, nil } + +func logStackTrace() { + // Create a buffer for storing the stack trace + const size = 4096 + buf := make([]byte, size) + + // Retrieve the stack trace and write it to the buffer + stackSize := runtime.Stack(buf, false) + + // Log the stack trace using zerolog + log.Error().Str("stack_trace", string(buf[:stackSize])).Msg("") +} diff --git a/go/internal/feast/transformation/transformation_service.go b/go/internal/feast/transformation/transformation_service.go new file mode 100644 index 00000000000..0595d463b37 --- /dev/null +++ b/go/internal/feast/transformation/transformation_service.go @@ -0,0 +1,205 @@ +package transformation + +import ( + "bytes" + "context" + "fmt" + "strings" + + "io" + + "github.com/feast-dev/feast/go/internal/feast/registry" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/ipc" + "github.com/apache/arrow/go/v17/arrow/memory" + "github.com/feast-dev/feast/go/internal/feast/model" + "github.com/feast-dev/feast/go/internal/feast/onlineserving" + "github.com/feast-dev/feast/go/protos/feast/serving" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type GrpcTransformationService struct { + project string + conn *grpc.ClientConn + client *serving.TransformationServiceClient +} + +func NewGrpcTransformationService(config *registry.RepoConfig, endpoint string) (*GrpcTransformationService, error) { + opts := make([]grpc.DialOption, 0) + opts = append(opts, grpc.WithDefaultCallOptions(), grpc.WithTransportCredentials(insecure.NewCredentials())) + + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return nil, err + } + client := serving.NewTransformationServiceClient(conn) + return &GrpcTransformationService{config.Project, conn, &client}, nil +} + +func (s *GrpcTransformationService) Close() error { + return s.conn.Close() +} + +func (s *GrpcTransformationService) GetTransformation( + ctx context.Context, + featureView *model.OnDemandFeatureView, + retrievedFeatures map[string]arrow.Array, + requestContext map[string]arrow.Array, + numRows int, + fullFeatureNames bool, +) ([]*onlineserving.FeatureVector, error) { + var err error + + inputFields := make([]arrow.Field, 0) + inputColumns := make([]arrow.Array, 0) + for name, arr := range retrievedFeatures { + inputFields = append(inputFields, arrow.Field{Name: name, Type: arr.DataType()}) + inputColumns = append(inputColumns, arr) + } + for name, arr := range requestContext { + inputFields = append(inputFields, arrow.Field{Name: name, Type: arr.DataType()}) + inputColumns = append(inputColumns, arr) + } + + inputSchema := arrow.NewSchema(inputFields, nil) + inputRecord := array.NewRecord(inputSchema, inputColumns, int64(numRows)) + defer inputRecord.Release() + + recordValueWriter := new(ByteSliceWriter) + arrowWriter, err := ipc.NewFileWriter(recordValueWriter, ipc.WithSchema(inputSchema)) + if err != nil { + return nil, err + } + + err = arrowWriter.Write(inputRecord) + if err != nil { + return nil, err + } + + err = arrowWriter.Close() + if err != nil { + return nil, err + } + + arrowInput := serving.ValueType_ArrowValue{ArrowValue: recordValueWriter.buf} + transformationInput := serving.ValueType{Value: &arrowInput} + + req := serving.TransformFeaturesRequest{ + OnDemandFeatureViewName: featureView.Base.Name, + Project: s.project, + TransformationInput: &transformationInput, + } + + res, err := (*s.client).TransformFeatures(ctx, &req) + if err != nil { + return nil, err + } + + arrowBytes := res.TransformationOutput.GetArrowValue() + return ExtractTransformationResponse(featureView, arrowBytes, numRows, false) +} + +func ExtractTransformationResponse( + featureView *model.OnDemandFeatureView, + arrowBytes []byte, + numRows int, + fullFeatureNames bool, +) ([]*onlineserving.FeatureVector, error) { + arrowMemory := memory.NewGoAllocator() + arrowReader, err := ipc.NewFileReader(bytes.NewReader(arrowBytes), ipc.WithAllocator(arrowMemory)) + if err != nil { + return nil, err + } + + outRecord, err := arrowReader.Read() + if err != nil { + return nil, err + } + result := make([]*onlineserving.FeatureVector, 0) + for idx, field := range outRecord.Schema().Fields() { + dropFeature := true + + featureName := strings.Split(field.Name, "__")[1] + if featureView.Base.Projection != nil { + + for _, feature := range featureView.Base.Projection.Features { + if featureName == feature.Name { + dropFeature = false + } + } + } else { + dropFeature = false + } + + if dropFeature { + continue + } + + statuses := make([]serving.FieldStatus, numRows) + timestamps := make([]*timestamppb.Timestamp, numRows) + + for idx := 0; idx < numRows; idx++ { + statuses[idx] = serving.FieldStatus_PRESENT + timestamps[idx] = timestamppb.Now() + } + + result = append(result, &onlineserving.FeatureVector{ + Name: featureName, + Values: outRecord.Column(idx), + Statuses: statuses, + Timestamps: timestamps, + }) + } + + return result, nil +} + +type ByteSliceWriter struct { + buf []byte + offset int64 +} + +func (w *ByteSliceWriter) Write(p []byte) (n int, err error) { + minCap := int(w.offset) + len(p) + if minCap > cap(w.buf) { // Make sure buf has enough capacity: + buf2 := make([]byte, len(w.buf), minCap+len(p)) // add some extra + copy(buf2, w.buf) + w.buf = buf2 + } + if minCap > len(w.buf) { + w.buf = w.buf[:minCap] + } + copy(w.buf[w.offset:], p) + w.offset += int64(len(p)) + return len(p), nil +} + +func (w *ByteSliceWriter) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + if w.offset != offset && (offset < 0 || offset > int64(len(w.buf))) { + return 0, fmt.Errorf("invalid seek: new offset %d out of range [0 %d]", offset, len(w.buf)) + } + w.offset = offset + return offset, nil + case io.SeekCurrent: + newOffset := w.offset + offset + if newOffset != offset && (newOffset < 0 || newOffset > int64(len(w.buf))) { + return 0, fmt.Errorf("invalid seek: new offset %d out of range [0 %d]", offset, len(w.buf)) + } + w.offset += offset + return w.offset, nil + case io.SeekEnd: + newOffset := int64(len(w.buf)) + offset + if newOffset != offset && (newOffset < 0 || newOffset > int64(len(w.buf))) { + return 0, fmt.Errorf("invalid seek: new offset %d out of range [0 %d]", offset, len(w.buf)) + } + w.offset = offset + return w.offset, nil + } + return 0, fmt.Errorf("unsupported seek mode %d", whence) +} diff --git a/go/internal/test/feature_repo/example.py b/go/internal/test/feature_repo/example.py index 70843610075..a814b58913b 100644 --- a/go/internal/test/feature_repo/example.py +++ b/go/internal/test/feature_repo/example.py @@ -2,10 +2,12 @@ from datetime import timedelta -from feast import Entity, Feature, FeatureView, Field, FileSource, FeatureService +from feast import Entity, Feature, FeatureView, Field, FileSource, FeatureService, RequestSource from feast.feature_logging import LoggingConfig from feast.infra.offline_stores.file_source import FileLoggingDestination -from feast.types import Float32, Int64 +from feast.types import Float32, Float64, Int64, PrimitiveFeastType +from feast.on_demand_feature_view import on_demand_feature_view +import pandas as pd # Read data from parquet files. Parquet is convenient for local development mode. For # production, you can use your favorite DWH, such as BigQuery. See Feast documentation @@ -41,4 +43,32 @@ name="test_service", features=[driver_hourly_stats_view], logging_config=LoggingConfig(destination=FileLoggingDestination(path="")) -) \ No newline at end of file +) + + +# Define a request data source which encodes features / information only +# available at request time (e.g. part of the user initiated HTTP request) +input_request = RequestSource( + name="vals_to_add", + schema=[ + Field(name="val_to_add", dtype=PrimitiveFeastType.INT64), + Field(name="val_to_add_2", dtype=PrimitiveFeastType.INT64), + ] +) + +# Use the input data and feature view features to create new features +@on_demand_feature_view( + sources=[ + driver_hourly_stats_view, + input_request + ], + schema=[ + Field(name='conv_rate_plus_val1', dtype=Float64), + Field(name='conv_rate_plus_val2', dtype=Float64) + ] +) +def transformed_conv_rate(features_df: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df['conv_rate_plus_val1'] = (features_df['conv_rate'] + features_df['val_to_add']) + df['conv_rate_plus_val2'] = (features_df['conv_rate'] + features_df['val_to_add_2']) + return df diff --git a/go/internal/test/flexible_coyote/feature_repo/data/online_store_for_pg.db b/go/internal/test/flexible_coyote/feature_repo/data/online_store_for_pg.db new file mode 100644 index 00000000000..e69de29bb2d diff --git a/go/internal/test/go_integration_test_utils.go b/go/internal/test/go_integration_test_utils.go index 3ec9aa2a4cd..5068f405063 100644 --- a/go/internal/test/go_integration_test_utils.go +++ b/go/internal/test/go_integration_test_utils.go @@ -5,20 +5,20 @@ import ( "fmt" "log" - "github.com/apache/arrow/go/v8/arrow/memory" + "github.com/apache/arrow/go/v17/arrow/memory" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/parquet/file" - "github.com/apache/arrow/go/v8/parquet/pqarrow" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/parquet/file" + "github.com/apache/arrow/go/v17/parquet/pqarrow" "os" "os/exec" "path/filepath" "time" - "github.com/apache/arrow/go/v8/arrow/array" + "github.com/apache/arrow/go/v17/arrow/array" "github.com/feast-dev/feast/go/internal/feast/model" "github.com/feast-dev/feast/go/protos/feast/types" @@ -107,7 +107,10 @@ func SetupCleanFeatureRepo(basePath string) error { return err } applyCommand.Dir = featureRepoPath - applyCommand.Run() + err = applyCommand.Run() + if err != nil { + return err + } t := time.Now() formattedTime := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", @@ -120,7 +123,6 @@ func SetupCleanFeatureRepo(basePath string) error { if err != nil { return err } - return nil } diff --git a/go/main.go b/go/main.go new file mode 100644 index 00000000000..feb54faa2e0 --- /dev/null +++ b/go/main.go @@ -0,0 +1,180 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + //"strings" + "syscall" + + "github.com/feast-dev/feast/go/internal/feast" + "github.com/feast-dev/feast/go/internal/feast/registry" + "github.com/feast-dev/feast/go/internal/feast/server" + "github.com/feast-dev/feast/go/internal/feast/server/logging" + "github.com/feast-dev/feast/go/protos/feast/serving" + "github.com/rs/zerolog/log" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + //grpctrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc" + //"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + +type ServerStarter interface { + StartHttpServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error + StartGrpcServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error +} + +type RealServerStarter struct{} + +func (s *RealServerStarter) StartHttpServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + return StartHttpServer(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) +} + +func (s *RealServerStarter) StartGrpcServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + return StartGrpcServer(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) +} + +func main() { + // Default values + serverType := "http" + host := "" + port := 8080 + server := RealServerStarter{} + // Current Directory + repoPath, err := os.Getwd() + if err != nil { + log.Error().Stack().Err(err).Msg("Failed to get current directory") + } + + flag.StringVar(&serverType, "type", serverType, "Specify the server type (http or grpc)") + flag.StringVar(&repoPath, "chdir", repoPath, "Repository path where feature store yaml file is stored") + + flag.StringVar(&host, "host", host, "Specify a host for the server") + flag.IntVar(&port, "port", port, "Specify a port for the server") + flag.Parse() + + repoConfig, err := registry.NewRepoConfigFromFile(repoPath) + if err != nil { + log.Fatal().Stack().Err(err).Msg("Failed to convert to RepoConfig") + } + + fs, err := feast.NewFeatureStore(repoConfig, nil) + if err != nil { + log.Fatal().Stack().Err(err).Msg("Failed to create NewFeatureStore") + } + + loggingOptions, err := repoConfig.GetLoggingOptions() + if err != nil { + log.Fatal().Stack().Err(err).Msg("Failed to get LoggingOptions") + } + + // TODO: writeLoggedFeaturesCallback is defaulted to nil. write_logged_features functionality needs to be + // implemented in Golang specific to OfflineStoreSink. Python Feature Server doesn't support this. + if serverType == "http" { + err = server.StartHttpServer(fs, host, port, nil, loggingOptions) + } else if serverType == "grpc" { + err = server.StartGrpcServer(fs, host, port, nil, loggingOptions) + } else { + fmt.Println("Unknown server type. Please specify 'http' or 'grpc'.") + } + + if err != nil { + log.Fatal().Stack().Err(err).Msg("Failed to start server") + } + +} + +func constructLoggingService(fs *feast.FeatureStore, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) (*logging.LoggingService, error) { + var loggingService *logging.LoggingService = nil + if writeLoggedFeaturesCallback != nil { + sink, err := logging.NewOfflineStoreSink(writeLoggedFeaturesCallback) + if err != nil { + return nil, err + } + + loggingService, err = logging.NewLoggingService(fs, sink, logging.LoggingOptions{ + ChannelCapacity: loggingOpts.ChannelCapacity, + EmitTimeout: loggingOpts.EmitTimeout, + WriteInterval: loggingOpts.WriteInterval, + FlushInterval: loggingOpts.FlushInterval, + }) + if err != nil { + return nil, err + } + } + return loggingService, nil +} + +// StartGprcServerWithLogging starts gRPC server with enabled feature logging +func StartGrpcServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + // #DD + //if strings.ToLower(os.Getenv("ENABLE_DATADOG_TRACING")) == "true" { + // tracer.Start(tracer.WithRuntimeMetrics()) + // defer tracer.Stop() + //} + loggingService, err := constructLoggingService(fs, writeLoggedFeaturesCallback, loggingOpts) + if err != nil { + return err + } + ser := server.NewGrpcServingServiceServer(fs, loggingService) + log.Info().Msgf("Starting a gRPC server on host %s port %d", host, port) + lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return err + } + + grpcServer := grpc.NewServer() + serving.RegisterServingServiceServer(grpcServer, ser) + healthService := health.NewServer() + grpc_health_v1.RegisterHealthServer(grpcServer, healthService) + + stop := make(chan os.Signal, 1) + signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM) + + go func() { + // As soon as these signals are received from OS, try to gracefully stop the gRPC server + <-stop + log.Info().Msg("Stopping the gRPC server...") + grpcServer.GracefulStop() + if loggingService != nil { + loggingService.Stop() + } + log.Info().Msg("gRPC server terminated") + }() + + return grpcServer.Serve(lis) +} + +// StartHttpServerWithLogging starts HTTP server with enabled feature logging +// Go does not allow direct assignment to package-level functions as a way to +// mock them for tests +func StartHttpServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + loggingService, err := constructLoggingService(fs, writeLoggedFeaturesCallback, loggingOpts) + if err != nil { + return err + } + ser := server.NewHttpServer(fs, loggingService) + log.Info().Msgf("Starting a HTTP server on host %s, port %d", host, port) + + stop := make(chan os.Signal, 1) + signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM) + + go func() { + // As soon as these signals are received from OS, try to gracefully stop the gRPC server + <-stop + log.Info().Msg("Stopping the HTTP server...") + err := ser.Stop() + if err != nil { + log.Error().Err(err).Msg("Error when stopping the HTTP server") + } + if loggingService != nil { + loggingService.Stop() + } + log.Info().Msg("HTTP server terminated") + }() + + return ser.Serve(host, port) +} diff --git a/go/main_test.go b/go/main_test.go new file mode 100644 index 00000000000..567a6cf5af4 --- /dev/null +++ b/go/main_test.go @@ -0,0 +1,71 @@ +package main + +import ( + "testing" + + "github.com/feast-dev/feast/go/internal/feast" + "github.com/feast-dev/feast/go/internal/feast/server/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// MockServerStarter is a mock of ServerStarter interface for testing +type MockServerStarter struct { + mock.Mock +} + +func (m *MockServerStarter) StartHttpServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + args := m.Called(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) + return args.Error(0) +} + +func (m *MockServerStarter) StartGrpcServer(fs *feast.FeatureStore, host string, port int, writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback, loggingOpts *logging.LoggingOptions) error { + args := m.Called(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) + return args.Error(0) +} + +// TestStartHttpServer tests the StartHttpServer function +func TestStartHttpServer(t *testing.T) { + mockServerStarter := new(MockServerStarter) + fs := &feast.FeatureStore{} + host := "localhost" + port := 8080 + var writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback + + loggingOpts := &logging.LoggingOptions{} + + mockServerStarter.On("StartHttpServer", fs, host, port, mock.AnythingOfType("logging.OfflineStoreWriteCallback"), loggingOpts).Return(nil) + + err := mockServerStarter.StartHttpServer(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) + assert.NoError(t, err) + mockServerStarter.AssertExpectations(t) +} + +// TestStartGrpcServer tests the StartGrpcServer function +func TestStartGrpcServer(t *testing.T) { + mockServerStarter := new(MockServerStarter) + fs := &feast.FeatureStore{} + host := "localhost" + port := 9090 + var writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback + loggingOpts := &logging.LoggingOptions{} + + mockServerStarter.On("StartGrpcServer", fs, host, port, mock.AnythingOfType("logging.OfflineStoreWriteCallback"), loggingOpts).Return(nil) + + err := mockServerStarter.StartGrpcServer(fs, host, port, writeLoggedFeaturesCallback, loggingOpts) + assert.NoError(t, err) + mockServerStarter.AssertExpectations(t) +} + +// TestConstructLoggingService tests the constructLoggingService function +func TestConstructLoggingService(t *testing.T) { + fs := &feast.FeatureStore{} + var writeLoggedFeaturesCallback logging.OfflineStoreWriteCallback + loggingOpts := &logging.LoggingOptions{} + + _, err := constructLoggingService(fs, writeLoggedFeaturesCallback, loggingOpts) + assert.NoError(t, err) + // Further assertions can be added here based on the expected behavior of constructLoggingService +} + +// Note: Additional tests can be written for other functions and error scenarios. diff --git a/go/types/typeconversion.go b/go/types/typeconversion.go index 18b4769b4d7..1864fe600ab 100644 --- a/go/types/typeconversion.go +++ b/go/types/typeconversion.go @@ -3,9 +3,9 @@ package types import ( "fmt" - "github.com/apache/arrow/go/v8/arrow" - "github.com/apache/arrow/go/v8/arrow/array" - "github.com/apache/arrow/go/v8/arrow/memory" + "github.com/apache/arrow/go/v17/arrow" + "github.com/apache/arrow/go/v17/arrow/array" + "github.com/apache/arrow/go/v17/arrow/memory" "github.com/feast-dev/feast/go/protos/feast/types" ) diff --git a/go/types/typeconversion_test.go b/go/types/typeconversion_test.go index 4869369c186..c9676cf59f4 100644 --- a/go/types/typeconversion_test.go +++ b/go/types/typeconversion_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/apache/arrow/go/v8/arrow/memory" + "github.com/apache/arrow/go/v17/arrow/memory" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" @@ -47,8 +47,8 @@ var ( {Val: &types.Value_Int32ListVal{&types.Int32List{Val: []int32{3, 4, 5}}}}, }, { - {Val: &types.Value_Int64ListVal{&types.Int64List{Val: []int64{0, 1, 2}}}}, - {Val: &types.Value_Int64ListVal{&types.Int64List{Val: []int64{3, 4, 5}}}}, + {Val: &types.Value_Int64ListVal{&types.Int64List{Val: []int64{0, 1, 2, 553248634761893728}}}}, + {Val: &types.Value_Int64ListVal{&types.Int64List{Val: []int64{3, 4, 5, 553248634761893729}}}}, }, { {Val: &types.Value_FloatListVal{&types.FloatList{Val: []float32{0.5, 1.5, 2}}}}, diff --git a/infra/charts/feast-feature-server/Chart.yaml b/infra/charts/feast-feature-server/Chart.yaml index dd547843d10..d8ed41d2782 100644 --- a/infra/charts/feast-feature-server/Chart.yaml +++ b/infra/charts/feast-feature-server/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: feast-feature-server description: Feast Feature Server in Go or Python type: application -version: 0.41.0 +version: 0.46.0 keywords: - machine learning - big data diff --git a/infra/charts/feast-feature-server/README.md b/infra/charts/feast-feature-server/README.md index a36f59d85ea..dc907ab8acf 100644 --- a/infra/charts/feast-feature-server/README.md +++ b/infra/charts/feast-feature-server/README.md @@ -1,6 +1,6 @@ # Feast Python / Go Feature Server Helm Charts -Current chart version is `0.41.0` +Current chart version is `0.46.0` ## Installation @@ -40,7 +40,7 @@ See [here](https://github.com/feast-dev/feast/tree/master/examples/python-helm-d | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"feastdev/feature-server"` | Docker image for Feature Server repository | -| image.tag | string | `"0.41.0"` | The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) | +| image.tag | string | `"0.46.0"` | The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) | | imagePullSecrets | list | `[]` | | | livenessProbe.initialDelaySeconds | int | `30` | | | livenessProbe.periodSeconds | int | `30` | | @@ -56,6 +56,7 @@ See [here](https://github.com/feast-dev/feast/tree/master/examples/python-helm-d | readinessProbe.periodSeconds | int | `10` | | | replicaCount | int | `1` | | | resources | object | `{}` | | +| route.enabled | bool | `false` | | | securityContext | object | `{}` | | | service.port | int | `80` | | | service.type | string | `"ClusterIP"` | | diff --git a/infra/charts/feast-feature-server/templates/route.yaml b/infra/charts/feast-feature-server/templates/route.yaml new file mode 100644 index 00000000000..2f4d36d9e5a --- /dev/null +++ b/infra/charts/feast-feature-server/templates/route.yaml @@ -0,0 +1,18 @@ +{{- if and (.Values.route.enabled) (eq .Values.feast_mode "ui") }} +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: {{ include "feast-feature-server.fullname" . }} + labels: + {{- include "feast-feature-server.labels" . | nindent 4 }} +spec: + to: + kind: Service + name: {{ include "feast-feature-server.fullname" . }} + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect +{{- end}} \ No newline at end of file diff --git a/infra/charts/feast-feature-server/values.yaml b/infra/charts/feast-feature-server/values.yaml index d894177558a..db2fbfb28bb 100644 --- a/infra/charts/feast-feature-server/values.yaml +++ b/infra/charts/feast-feature-server/values.yaml @@ -9,7 +9,7 @@ image: repository: feastdev/feature-server pullPolicy: IfNotPresent # image.tag -- The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) - tag: 0.41.0 + tag: 0.46.0 logLevel: "WARNING" # Set log level DEBUG, INFO, WARNING, ERROR, and CRITICAL (case-insensitive) @@ -74,3 +74,7 @@ livenessProbe: readinessProbe: initialDelaySeconds: 20 periodSeconds: 10 + +# to create OpenShift Route object for UI +route: + enabled: false \ No newline at end of file diff --git a/infra/charts/feast/Chart.yaml b/infra/charts/feast/Chart.yaml index a192da89116..d0dfebbf2b5 100644 --- a/infra/charts/feast/Chart.yaml +++ b/infra/charts/feast/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 description: Feature store for machine learning name: feast -version: 0.41.0 +version: 0.46.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/README.md b/infra/charts/feast/README.md index e49fbf6d967..e89c8331d30 100644 --- a/infra/charts/feast/README.md +++ b/infra/charts/feast/README.md @@ -8,7 +8,7 @@ This repo contains Helm charts for Feast Java components that are being installe ## Chart: Feast -Feature store for machine learning Current chart version is `0.41.0` +Feature store for machine learning Current chart version is `0.46.0` ## Installation @@ -65,8 +65,8 @@ See [here](https://github.com/feast-dev/feast/tree/master/examples/java-demo) fo | Repository | Name | Version | |------------|------|---------| | https://charts.helm.sh/stable | redis | 10.5.6 | -| https://feast-helm-charts.storage.googleapis.com | feature-server(feature-server) | 0.41.0 | -| https://feast-helm-charts.storage.googleapis.com | transformation-service(transformation-service) | 0.41.0 | +| https://feast-helm-charts.storage.googleapis.com | feature-server(feature-server) | 0.46.0 | +| https://feast-helm-charts.storage.googleapis.com | transformation-service(transformation-service) | 0.46.0 | ## Values diff --git a/infra/charts/feast/charts/feature-server/Chart.yaml b/infra/charts/feast/charts/feature-server/Chart.yaml index 69748a362f0..a4c10bdd5b4 100644 --- a/infra/charts/feast/charts/feature-server/Chart.yaml +++ b/infra/charts/feast/charts/feature-server/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 description: "Feast Feature Server: Online feature serving service for Feast" name: feature-server -version: 0.41.0 -appVersion: v0.41.0 +version: 0.46.0 +appVersion: v0.46.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/charts/feature-server/README.md b/infra/charts/feast/charts/feature-server/README.md index ab77911a8f8..697596b2eb2 100644 --- a/infra/charts/feast/charts/feature-server/README.md +++ b/infra/charts/feast/charts/feature-server/README.md @@ -1,6 +1,6 @@ # feature-server -![Version: 0.41.0](https://img.shields.io/badge/Version-0.41.0-informational?style=flat-square) ![AppVersion: v0.41.0](https://img.shields.io/badge/AppVersion-v0.41.0-informational?style=flat-square) +![Version: 0.46.0](https://img.shields.io/badge/Version-0.46.0-informational?style=flat-square) ![AppVersion: v0.46.0](https://img.shields.io/badge/AppVersion-v0.46.0-informational?style=flat-square) Feast Feature Server: Online feature serving service for Feast @@ -17,7 +17,7 @@ Feast Feature Server: Online feature serving service for Feast | envOverrides | object | `{}` | Extra environment variables to set | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"feastdev/feature-server-java"` | Docker image for Feature Server repository | -| image.tag | string | `"0.41.0"` | Image tag | +| image.tag | string | `"0.46.0"` | Image tag | | ingress.grpc.annotations | object | `{}` | Extra annotations for the ingress | | ingress.grpc.auth.enabled | bool | `false` | Flag to enable auth | | ingress.grpc.class | string | `"nginx"` | Which ingress controller to use | diff --git a/infra/charts/feast/charts/feature-server/values.yaml b/infra/charts/feast/charts/feature-server/values.yaml index 646d735ef85..48681f83ca0 100644 --- a/infra/charts/feast/charts/feature-server/values.yaml +++ b/infra/charts/feast/charts/feature-server/values.yaml @@ -5,7 +5,7 @@ image: # image.repository -- Docker image for Feature Server repository repository: feastdev/feature-server-java # image.tag -- Image tag - tag: 0.41.0 + tag: 0.46.0 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent diff --git a/infra/charts/feast/charts/transformation-service/Chart.yaml b/infra/charts/feast/charts/transformation-service/Chart.yaml index 6c450852cbf..12123e505e3 100644 --- a/infra/charts/feast/charts/transformation-service/Chart.yaml +++ b/infra/charts/feast/charts/transformation-service/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 description: "Transformation service: to compute on-demand features" name: transformation-service -version: 0.41.0 -appVersion: v0.41.0 +version: 0.46.0 +appVersion: v0.46.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/charts/transformation-service/README.md b/infra/charts/feast/charts/transformation-service/README.md index a00a21f034e..4dfd213bf2e 100644 --- a/infra/charts/feast/charts/transformation-service/README.md +++ b/infra/charts/feast/charts/transformation-service/README.md @@ -1,6 +1,6 @@ # transformation-service -![Version: 0.41.0](https://img.shields.io/badge/Version-0.41.0-informational?style=flat-square) ![AppVersion: v0.41.0](https://img.shields.io/badge/AppVersion-v0.41.0-informational?style=flat-square) +![Version: 0.46.0](https://img.shields.io/badge/Version-0.46.0-informational?style=flat-square) ![AppVersion: v0.46.0](https://img.shields.io/badge/AppVersion-v0.46.0-informational?style=flat-square) Transformation service: to compute on-demand features @@ -13,7 +13,7 @@ Transformation service: to compute on-demand features | envOverrides | object | `{}` | Extra environment variables to set | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"feastdev/feature-transformation-server"` | Docker image for Transformation Server repository | -| image.tag | string | `"0.41.0"` | Image tag | +| image.tag | string | `"0.46.0"` | Image tag | | nodeSelector | object | `{}` | Node labels for pod assignment | | podLabels | object | `{}` | Labels to be added to Feast Serving pods | | replicaCount | int | `1` | Number of pods that will be created | diff --git a/infra/charts/feast/charts/transformation-service/values.yaml b/infra/charts/feast/charts/transformation-service/values.yaml index 51cd72d6592..3d056d5b25f 100644 --- a/infra/charts/feast/charts/transformation-service/values.yaml +++ b/infra/charts/feast/charts/transformation-service/values.yaml @@ -5,7 +5,7 @@ image: # image.repository -- Docker image for Transformation Server repository repository: feastdev/feature-transformation-server # image.tag -- Image tag - tag: 0.41.0 + tag: 0.46.0 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent diff --git a/infra/charts/feast/requirements.yaml b/infra/charts/feast/requirements.yaml index bb69ee9ed30..11d9026f2df 100644 --- a/infra/charts/feast/requirements.yaml +++ b/infra/charts/feast/requirements.yaml @@ -1,12 +1,12 @@ dependencies: - name: feature-server alias: feature-server - version: 0.41.0 + version: 0.46.0 condition: feature-server.enabled repository: https://feast-helm-charts.storage.googleapis.com - name: transformation-service alias: transformation-service - version: 0.41.0 + version: 0.46.0 condition: transformation-service.enabled repository: https://feast-helm-charts.storage.googleapis.com - name: redis diff --git a/infra/feast-helm-operator/Makefile b/infra/feast-helm-operator/Makefile index 733bf7bc3dd..76614ae37af 100644 --- a/infra/feast-helm-operator/Makefile +++ b/infra/feast-helm-operator/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.41.0 +VERSION ?= 0.46.0 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") diff --git a/infra/feast-helm-operator/README.md b/infra/feast-helm-operator/README.md index ba9fe17fa3c..e6d9caee9f5 100644 --- a/infra/feast-helm-operator/README.md +++ b/infra/feast-helm-operator/README.md @@ -1,4 +1,4 @@ -# Feast Feature Server Helm-based Operator +# Feast Feature Server Helm-based Operator (Deprecated replaced by [feast-operator](../feast-operator/README.md)) This Operator was built with the [operator-sdk](https://github.com/operator-framework/operator-sdk) and leverages the [feast-feature-server helm chart](/infra/charts/feast-feature-server). diff --git a/infra/feast-helm-operator/config/manager/kustomization.yaml b/infra/feast-helm-operator/config/manager/kustomization.yaml index decb714a200..bc970e7b408 100644 --- a/infra/feast-helm-operator/config/manager/kustomization.yaml +++ b/infra/feast-helm-operator/config/manager/kustomization.yaml @@ -5,4 +5,4 @@ kind: Kustomization images: - name: controller newName: feastdev/feast-helm-operator - newTag: 0.41.0 + newTag: 0.46.0 diff --git a/infra/feast-operator/.gitignore b/infra/feast-operator/.gitignore index 72df211c0ca..c0c53d8e17e 100644 --- a/infra/feast-operator/.gitignore +++ b/infra/feast-operator/.gitignore @@ -27,4 +27,8 @@ go.work *~ # Installer file generated by Kustomize - skip 'dist/' directories within the Feast project except this one. -!dist/ \ No newline at end of file +!dist/ +bin +bin/ +/bin/ + diff --git a/infra/feast-operator/.golangci.yml b/infra/feast-operator/.golangci.yml index ca69a11f6fd..6c104980d43 100644 --- a/infra/feast-operator/.golangci.yml +++ b/infra/feast-operator/.golangci.yml @@ -16,12 +16,20 @@ issues: linters: - dupl - lll + - path: "test/*" + linters: + - lll + - path: "upgrade/*" + linters: + - lll + - path: "previous-version/*" + linters: + - lll linters: disable-all: true enable: - dupl - errcheck - - exportloopref - goconst - gocyclo - gofmt @@ -32,9 +40,16 @@ linters: - lll - misspell - nakedret + - ginkgolinter - prealloc + - revive - staticcheck - typecheck - unconvert - unparam - unused + +linters-settings: + revive: + rules: + - name: comment-spacings diff --git a/infra/feast-operator/Dockerfile b/infra/feast-operator/Dockerfile index aca26f92295..c7d4f6b696a 100644 --- a/infra/feast-operator/Dockerfile +++ b/infra/feast-operator/Dockerfile @@ -1,9 +1,8 @@ # Build the manager binary -FROM golang:1.21 AS builder +FROM registry.access.redhat.com/ubi8/go-toolset:1.22.9 AS builder ARG TARGETOS ARG TARGETARCH -WORKDIR /workspace # Copy the Go Modules manifests COPY go.mod go.mod COPY go.sum go.sum @@ -23,11 +22,9 @@ COPY internal/controller/ internal/controller/ # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go -# Use distroless as minimal base image to package the manager binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM registry.access.redhat.com/ubi8/ubi-micro:8.10 WORKDIR / -COPY --from=builder /workspace/manager . +COPY --from=builder /opt/app-root/src/manager . USER 65532:65532 ENTRYPOINT ["/manager"] diff --git a/infra/feast-operator/Makefile b/infra/feast-operator/Makefile index 54786eb5f1a..cde9f87982a 100644 --- a/infra/feast-operator/Makefile +++ b/infra/feast-operator/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.41.0 +VERSION ?= 0.46.0 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") @@ -48,11 +48,12 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.37.0 +OPERATOR_SDK_VERSION ?= v1.38.0 # Image URL to use all building/pushing image targets IMG ?= $(IMAGE_TAG_BASE):$(VERSION) +FS_IMG ?= docker.io/feastdev/feature-server:$(VERSION) # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.29.0 +ENVTEST_K8S_VERSION = 1.30.0 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -96,7 +97,7 @@ help: ## Display this help. .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd:maxDescLen=120 webhook paths="./..." output:crd:artifacts:config=config/crd/bases .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. @@ -111,13 +112,27 @@ vet: ## Run go vet against code. go vet ./... .PHONY: test -test: build-installer fmt vet lint envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out +test: build-installer vet lint envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v test/e2e | grep -v test/data-source-types | grep -v test/upgrade | grep -v test/previous-version) -coverprofile cover.out # Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. test-e2e: - go test ./test/e2e/ -v -ginkgo.v + go test -timeout 60m ./test/e2e/ -v -ginkgo.v + +.PHONY: test-upgrade # Run the upgrade tests against a Kind k8s instance that is spun up. +test-upgrade: + go test -timeout 60m ./test/upgrade/ -v -ginkgo.v + +.PHONY: test-previous-version # Run e2e tests against previous version in a Kind k8s instance that is spun up. +test-previous-version: + go test -timeout 60m ./test/previous-version/ -v -ginkgo.v + +# Requires python3 +.PHONY: test-datasources +test-datasources: + python3 test/data-source-types/data-source-types.py + go test ./test/data-source-types/ .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint @@ -142,7 +157,12 @@ run: manifests generate fmt vet ## Run a controller from your host. # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ .PHONY: docker-build docker-build: ## Build docker image with the manager. - $(CONTAINER_TOOL) build -t ${IMG} . + $(CONTAINER_TOOL) build -t ${IMG} --load . + +## Build feast docker image. +.PHONY: feast-ci-dev-docker-img +feast-ci-dev-docker-img: + cd ./../.. && make build-feature-server-dev .PHONY: docker-push docker-push: ## Push docker image with the manager. @@ -161,12 +181,12 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - $(CONTAINER_TOOL) buildx create --name project-v3-builder $(CONTAINER_TOOL) buildx use project-v3-builder - - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross --load . - $(CONTAINER_TOOL) buildx rm project-v3-builder rm Dockerfile.cross .PHONY: build-installer -build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. +build-installer: manifests generate-ref kustomize related-image-fs ## Generate a consolidated YAML with CRDs and deployment. mkdir -p dist cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default > dist/install.yaml @@ -186,7 +206,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy -deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. +deploy: manifests kustomize related-image-fs ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - @@ -197,22 +217,27 @@ undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/. ##@ Dependencies ## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin +LOCALDIR ?= $(shell pwd) +LOCALBIN ?= $(LOCALDIR)/bin $(LOCALBIN): mkdir -p $(LOCALBIN) ## Tool Binaries KUBECTL ?= kubectl -KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) -ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +CRD_REF_DOCS ?= $(LOCALBIN)/crd-ref-docs +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint +ENVSUBST = $(LOCALBIN)/envsubst ## Tool Versions -KUSTOMIZE_VERSION ?= v5.3.0 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 -ENVTEST_VERSION ?= release-0.17 -GOLANGCI_LINT_VERSION ?= v1.57.2 +KUSTOMIZE_VERSION ?= v5.4.2 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +CRD_REF_DOCS_VERSION ?= v0.1.0 +ENVTEST_VERSION ?= release-0.18 +GOLANGCI_LINT_VERSION ?= v1.59.1 +ENVSUBST_VERSION ?= v1.4.2 .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -232,20 +257,28 @@ $(ENVTEST): $(LOCALBIN) .PHONY: golangci-lint golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. $(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +.PHONY: envsubst +envsubst: $(ENVSUBST) ## Download envsubst locally if necessary. +$(ENVSUBST): $(LOCALBIN) + $(call go-install-tool,$(ENVSUBST),github.com/a8m/envsubst/cmd/envsubst,$(ENVSUBST_VERSION)) # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist # $1 - target path with name of binary (ideally with version) # $2 - package url which can be installed # $3 - specific version of package define go-install-tool -@[ -f $(1) ] || { \ +@[ -f "$(1)-$(3)" ] || { \ + echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ set -e; \ package=$(2)@$(3) ;\ echo "Downloading $${package}" ;\ GOBIN=$(LOCALBIN) go install $${package} ;\ -mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ -} +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) endef .PHONY: operator-sdk @@ -265,8 +298,17 @@ OPERATOR_SDK = $(shell which operator-sdk) endif endif +.PHONY: crd-ref-docs +crd-ref-docs: $(CRD_REF_DOCS) ## Download crd-ref-docs locally if necessary. +$(CRD_REF_DOCS): $(LOCALBIN) + $(call go-install-tool,$(CRD_REF_DOCS),github.com/elastic/crd-ref-docs,$(CRD_REF_DOCS_VERSION)) + +.PHONY: generate-ref +generate-ref: generate fmt crd-ref-docs + $(CRD_REF_DOCS) --log-level=WARN --max-depth=30 --config=$(LOCALDIR)/docs/crd-ref-templates/config.yaml --source-path=$(LOCALDIR)/api/v1alpha1 --renderer=markdown --templates-dir=$(LOCALDIR)/docs/crd-ref-templates/markdown --output-path=$(LOCALDIR)/docs/api/markdown/ref.md + .PHONY: bundle -bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. +bundle: manifests kustomize related-image-fs operator-sdk ## Generate bundle manifests and metadata, then validate generated files. $(OPERATOR_SDK) generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) @@ -320,3 +362,7 @@ catalog-build: opm ## Build a catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) + +.PHONY: related-image-fs +related-image-fs: envsubst + FS_IMG=$(FS_IMG) $(ENVSUBST) < config/default/related_image_fs_patch.tmpl > config/default/related_image_fs_patch.yaml diff --git a/infra/feast-operator/README.md b/infra/feast-operator/README.md index 32e2ef11b53..b4c3ed6565b 100644 --- a/infra/feast-operator/README.md +++ b/infra/feast-operator/README.md @@ -1,10 +1,12 @@ # Feast Operator This is a K8s Operator that can be used to deploy and manage **Feast**, an open source feature store for machine learning. +### **[FeatureStore CR API Reference](docs/api/markdown/ref.md)** + ## Getting Started ### Prerequisites -- go version v1.21.0+ +- go version v1.22 - docker version 17.03+. - kubectl version v1.11.3+. - Access to a Kubernetes v1.11.3+ cluster. @@ -108,8 +110,8 @@ make deploy IMG=/feast-operator: ``` ### Prerequisites -- go version v1.21 -- operator-sdk version v1.37.0 +- go version v1.22 +- operator-sdk version v1.38.0 **NOTE:** Run `make help` for more information on all potential `make` targets @@ -131,3 +133,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + +## Running End-to-End integration tests on local(dev) environment +You need a kind cluster to run the e2e tests on local(dev) environment. + +```shell +# Default kind cluster configuration is not enough to run all the pods. In my case i was using docker with colima. kind uses the cpi and memory assigned to docker. +# below memory configuration worked well but if you are using other docker runtime then please increase the cpu and memory. +colima start --cpu 10 --memory 15 --disk 100 + +# create the kind cluster +kind create cluster + +# set kubernetes context to the recently created kind cluster +kubectl cluster-info --context kind-kind + +# run the command from operator directory to run e2e tests. +make test-e2e + +# delete cluster once you are done. +kind delete cluster +``` diff --git a/infra/feast-operator/api/feastversion/version.go b/infra/feast-operator/api/feastversion/version.go index 77a9db1d57f..a0e99c98056 100644 --- a/infra/feast-operator/api/feastversion/version.go +++ b/infra/feast-operator/api/feastversion/version.go @@ -16,5 +16,5 @@ limitations under the License. package feastversion -// Feast release version -const FeastVersion = "0.41.0" +// Feast release version. Keep on line #20, this is critical to release CI +const FeastVersion = "0.46.0" diff --git a/infra/feast-operator/api/v1alpha1/featurestore_types.go b/infra/feast-operator/api/v1alpha1/featurestore_types.go index 87e1cd64841..1d00163d61b 100644 --- a/infra/feast-operator/api/v1alpha1/featurestore_types.go +++ b/infra/feast-operator/api/v1alpha1/featurestore_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -28,26 +29,34 @@ const ( FailedPhase = "Failed" // Feast condition types: - ClientReadyType = "Client" - OfflineStoreReadyType = "OfflineStore" - OnlineStoreReadyType = "OnlineStore" - RegistryReadyType = "Registry" - ReadyType = "FeatureStore" + ClientReadyType = "Client" + OfflineStoreReadyType = "OfflineStore" + OnlineStoreReadyType = "OnlineStore" + RegistryReadyType = "Registry" + UIReadyType = "UI" + ReadyType = "FeatureStore" + AuthorizationReadyType = "Authorization" // Feast condition reasons: - ReadyReason = "Ready" - FailedReason = "FeatureStoreFailed" - OfflineStoreFailedReason = "OfflineStoreDeploymentFailed" - OnlineStoreFailedReason = "OnlineStoreDeploymentFailed" - RegistryFailedReason = "RegistryDeploymentFailed" - ClientFailedReason = "ClientDeploymentFailed" + ReadyReason = "Ready" + FailedReason = "FeatureStoreFailed" + DeploymentNotAvailableReason = "DeploymentNotAvailable" + OfflineStoreFailedReason = "OfflineStoreDeploymentFailed" + OnlineStoreFailedReason = "OnlineStoreDeploymentFailed" + RegistryFailedReason = "RegistryDeploymentFailed" + UIFailedReason = "UIDeploymentFailed" + ClientFailedReason = "ClientDeploymentFailed" + KubernetesAuthzFailedReason = "KubernetesAuthorizationDeploymentFailed" // Feast condition messages: - ReadyMessage = "FeatureStore installation complete" - OfflineStoreReadyMessage = "Offline Store installation complete" - OnlineStoreReadyMessage = "Online Store installation complete" - RegistryReadyMessage = "Registry installation complete" - ClientReadyMessage = "Client installation complete" + ReadyMessage = "FeatureStore installation complete" + OfflineStoreReadyMessage = "Offline Store installation complete" + OnlineStoreReadyMessage = "Online Store installation complete" + RegistryReadyMessage = "Registry installation complete" + UIReadyMessage = "UI installation complete" + ClientReadyMessage = "Client installation complete" + KubernetesAuthzReadyMessage = "Kubernetes authorization installation complete" + DeploymentNotAvailableMessage = "Deployment is not available" // entity_key_serialization_version SerializationVersion = 3 @@ -57,30 +66,231 @@ const ( type FeatureStoreSpec struct { // +kubebuilder:validation:Pattern="^[A-Za-z0-9][A-Za-z0-9_]*$" // FeastProject is the Feast project id. This can be any alphanumeric string with underscores, but it cannot start with an underscore. Required. - FeastProject string `json:"feastProject"` - Services *FeatureStoreServices `json:"services,omitempty"` + FeastProject string `json:"feastProject"` + FeastProjectDir *FeastProjectDir `json:"feastProjectDir,omitempty"` + Services *FeatureStoreServices `json:"services,omitempty"` + AuthzConfig *AuthzConfig `json:"authz,omitempty"` } -// FeatureStoreServices defines the desired feast service deployments. ephemeral registry is deployed by default. +// FeastProjectDir defines how to create the feast project directory. +// +kubebuilder:validation:XValidation:rule="[has(self.git), has(self.init)].exists_one(c, c)",message="One selection required between init or git." +type FeastProjectDir struct { + Git *GitCloneOptions `json:"git,omitempty"` + Init *FeastInitOptions `json:"init,omitempty"` +} + +// GitCloneOptions describes how a clone should be performed. +// +kubebuilder:validation:XValidation:rule="has(self.featureRepoPath) ? !self.featureRepoPath.startsWith('/') : true",message="RepoPath must be a file name only, with no slashes." +type GitCloneOptions struct { + // The repository URL to clone from. + URL string `json:"url"` + // Reference to a branch / tag / commit + Ref string `json:"ref,omitempty"` + // Configs passed to git via `-c` + // e.g. http.sslVerify: 'false' + // OR 'url."https://api:\${TOKEN}@github.com/".insteadOf': 'https://github.com/' + Configs map[string]string `json:"configs,omitempty"` + // FeatureRepoPath is the relative path to the feature repo subdirectory. Default is 'feature_repo'. + FeatureRepoPath string `json:"featureRepoPath,omitempty"` + Env *[]corev1.EnvVar `json:"env,omitempty"` + EnvFrom *[]corev1.EnvFromSource `json:"envFrom,omitempty"` +} + +// FeastInitOptions defines how to run a `feast init`. +type FeastInitOptions struct { + Minimal bool `json:"minimal,omitempty"` + // Template for the created project + // +kubebuilder:validation:Enum=local;gcp;aws;snowflake;spark;postgres;hbase;cassandra;hazelcast;ikv;couchbase + Template string `json:"template,omitempty"` +} + +// FeatureStoreServices defines the desired feast services. An ephemeral onlineStore feature server is deployed by default. type FeatureStoreServices struct { OfflineStore *OfflineStore `json:"offlineStore,omitempty"` OnlineStore *OnlineStore `json:"onlineStore,omitempty"` Registry *Registry `json:"registry,omitempty"` + // Creates a UI server container + UI *ServerConfigs `json:"ui,omitempty"` + DeploymentStrategy *appsv1.DeploymentStrategy `json:"deploymentStrategy,omitempty"` + // Disable the 'feast repo initialization' initContainer + DisableInitContainers bool `json:"disableInitContainers,omitempty"` + // Volumes specifies the volumes to mount in the FeatureStore deployment. A corresponding `VolumeMount` should be added to whichever feast service(s) require access to said volume(s). + Volumes []corev1.Volume `json:"volumes,omitempty"` } -// OfflineStore configures the deployed offline store service +// OfflineStore configures the offline store service type OfflineStore struct { - ServiceConfigs `json:",inline"` + // Creates a remote offline server container + Server *ServerConfigs `json:"server,omitempty"` + Persistence *OfflineStorePersistence `json:"persistence,omitempty"` +} + +// OfflineStorePersistence configures the persistence settings for the offline store service +// +kubebuilder:validation:XValidation:rule="[has(self.file), has(self.store)].exists_one(c, c)",message="One selection required between file or store." +type OfflineStorePersistence struct { + FilePersistence *OfflineStoreFilePersistence `json:"file,omitempty"` + DBPersistence *OfflineStoreDBStorePersistence `json:"store,omitempty"` +} + +// OfflineStoreFilePersistence configures the file-based persistence for the offline store service +type OfflineStoreFilePersistence struct { + // +kubebuilder:validation:Enum=file;dask;duckdb + Type string `json:"type,omitempty"` + PvcConfig *PvcConfig `json:"pvc,omitempty"` +} + +var ValidOfflineStoreFilePersistenceTypes = []string{ + "dask", + "duckdb", + "file", +} + +// OfflineStoreDBStorePersistence configures the DB store persistence for the offline store service +type OfflineStoreDBStorePersistence struct { + // Type of the persistence type you want to use. + // +kubebuilder:validation:Enum=snowflake.offline;bigquery;redshift;spark;postgres;trino;athena;mssql;couchbase.offline + Type string `json:"type"` + // Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. + SecretRef corev1.LocalObjectReference `json:"secretRef"` + // By default, the selected store "type" is used as the SecretKeyName + SecretKeyName string `json:"secretKeyName,omitempty"` +} + +var ValidOfflineStoreDBStorePersistenceTypes = []string{ + "snowflake.offline", + "bigquery", + "redshift", + "spark", + "postgres", + "trino", + "athena", + "mssql", + "couchbase.offline", } -// OnlineStore configures the deployed online store service +// OnlineStore configures the online store service type OnlineStore struct { - ServiceConfigs `json:",inline"` + // Creates a feature server container + Server *ServerConfigs `json:"server,omitempty"` + Persistence *OnlineStorePersistence `json:"persistence,omitempty"` +} + +// OnlineStorePersistence configures the persistence settings for the online store service +// +kubebuilder:validation:XValidation:rule="[has(self.file), has(self.store)].exists_one(c, c)",message="One selection required between file or store." +type OnlineStorePersistence struct { + FilePersistence *OnlineStoreFilePersistence `json:"file,omitempty"` + DBPersistence *OnlineStoreDBStorePersistence `json:"store,omitempty"` +} + +// OnlineStoreFilePersistence configures the file-based persistence for the online store service +// +kubebuilder:validation:XValidation:rule="(!has(self.pvc) && has(self.path)) ? self.path.startsWith('/') : true",message="Ephemeral stores must have absolute paths." +// +kubebuilder:validation:XValidation:rule="(has(self.pvc) && has(self.path)) ? !self.path.startsWith('/') : true",message="PVC path must be a file name only, with no slashes." +// +kubebuilder:validation:XValidation:rule="has(self.path) ? !(self.path.startsWith('s3://') || self.path.startsWith('gs://')) : true",message="Online store does not support S3 or GS buckets." +type OnlineStoreFilePersistence struct { + Path string `json:"path,omitempty"` + PvcConfig *PvcConfig `json:"pvc,omitempty"` } -// LocalRegistryConfig configures the deployed registry service +// OnlineStoreDBStorePersistence configures the DB store persistence for the online store service +type OnlineStoreDBStorePersistence struct { + // Type of the persistence type you want to use. + // +kubebuilder:validation:Enum=snowflake.online;redis;ikv;datastore;dynamodb;bigtable;postgres;cassandra;mysql;hazelcast;singlestore;hbase;elasticsearch;qdrant;couchbase.online;milvus + Type string `json:"type"` + // Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. + SecretRef corev1.LocalObjectReference `json:"secretRef"` + // By default, the selected store "type" is used as the SecretKeyName + SecretKeyName string `json:"secretKeyName,omitempty"` +} + +var ValidOnlineStoreDBStorePersistenceTypes = []string{ + "snowflake.online", + "redis", + "ikv", + "datastore", + "dynamodb", + "bigtable", + "postgres", + "cassandra", + "mysql", + "hazelcast", + "singlestore", + "hbase", + "elasticsearch", + "qdrant", + "couchbase.online", + "milvus", +} + +// LocalRegistryConfig configures the registry service type LocalRegistryConfig struct { - ServiceConfigs `json:",inline"` + // Creates a registry server container + Server *ServerConfigs `json:"server,omitempty"` + Persistence *RegistryPersistence `json:"persistence,omitempty"` +} + +// RegistryPersistence configures the persistence settings for the registry service +// +kubebuilder:validation:XValidation:rule="[has(self.file), has(self.store)].exists_one(c, c)",message="One selection required between file or store." +type RegistryPersistence struct { + FilePersistence *RegistryFilePersistence `json:"file,omitempty"` + DBPersistence *RegistryDBStorePersistence `json:"store,omitempty"` +} + +// RegistryFilePersistence configures the file-based persistence for the registry service +// +kubebuilder:validation:XValidation:rule="(!has(self.pvc) && has(self.path)) ? (self.path.startsWith('/') || self.path.startsWith('s3://') || self.path.startsWith('gs://')) : true",message="Registry files must use absolute paths or be S3 ('s3://') or GS ('gs://') object store URIs." +// +kubebuilder:validation:XValidation:rule="(has(self.pvc) && has(self.path)) ? !self.path.startsWith('/') : true",message="PVC path must be a file name only, with no slashes." +// +kubebuilder:validation:XValidation:rule="(has(self.pvc) && has(self.path)) ? !(self.path.startsWith('s3://') || self.path.startsWith('gs://')) : true",message="PVC persistence does not support S3 or GS object store URIs." +// +kubebuilder:validation:XValidation:rule="(has(self.s3_additional_kwargs) && has(self.path)) ? self.path.startsWith('s3://') : true",message="Additional S3 settings are available only for S3 object store URIs." +type RegistryFilePersistence struct { + Path string `json:"path,omitempty"` + PvcConfig *PvcConfig `json:"pvc,omitempty"` + S3AdditionalKwargs *map[string]string `json:"s3_additional_kwargs,omitempty"` +} + +// RegistryDBStorePersistence configures the DB store persistence for the registry service +type RegistryDBStorePersistence struct { + // Type of the persistence type you want to use. + // +kubebuilder:validation:Enum=sql;snowflake.registry + Type string `json:"type"` + // Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. + SecretRef corev1.LocalObjectReference `json:"secretRef"` + // By default, the selected store "type" is used as the SecretKeyName + SecretKeyName string `json:"secretKeyName,omitempty"` +} + +var ValidRegistryDBStorePersistenceTypes = []string{ + "sql", + "snowflake.registry", +} + +// PvcConfig defines the settings for a persistent file store based on PVCs. +// We can refer to an existing PVC using the `Ref` field, or create a new one using the `Create` field. +// +kubebuilder:validation:XValidation:rule="[has(self.ref), has(self.create)].exists_one(c, c)",message="One selection is required between ref and create." +// +kubebuilder:validation:XValidation:rule="self.mountPath.matches('^/[^:]*$')",message="Mount path must start with '/' and must not contain ':'" +type PvcConfig struct { + // Reference to an existing field + Ref *corev1.LocalObjectReference `json:"ref,omitempty"` + // Settings for creating a new PVC + Create *PvcCreate `json:"create,omitempty"` + // MountPath within the container at which the volume should be mounted. + // Must start by "/" and cannot contain ':'. + MountPath string `json:"mountPath"` +} + +// PvcCreate defines the immutable settings to create a new PVC mounted at the given path. +// The PVC name is the same as the associated deployment & feast service name. +// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="PvcCreate is immutable" +type PvcCreate struct { + // AccessModes k8s persistent volume access modes. Defaults to ["ReadWriteOnce"]. + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // StorageClassName is the name of an existing StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass and the cluster default will be used. + StorageClassName *string `json:"storageClassName,omitempty"` + // Resources describes the storage resource requirements for a volume. + // Default requested storage size depends on the associated service: + // - 10Gi for offline store + // - 5Gi for online store + // - 5Gi for registry + Resources corev1.VolumeResourceRequirements `json:"resources,omitempty"` } // Registry configures the registry service. One selection is required. Local is the default setting. @@ -97,7 +307,8 @@ type RemoteRegistryConfig struct { // Host address of the remote registry service - :, e.g. `registry..svc.cluster.local:80` Hostname *string `json:"hostname,omitempty"` // Reference to an existing `FeatureStore` CR in the same k8s cluster. - FeastRef *FeatureStoreRef `json:"feastRef,omitempty"` + FeastRef *FeatureStoreRef `json:"feastRef,omitempty"` + TLS *TlsRemoteRegistryConfigs `json:"tls,omitempty"` } // FeatureStoreRef defines which existing FeatureStore's registry should be used @@ -108,35 +319,114 @@ type FeatureStoreRef struct { Namespace string `json:"namespace,omitempty"` } -// ServiceConfigs k8s container settings -type ServiceConfigs struct { - DefaultConfigs `json:",inline"` - OptionalConfigs `json:",inline"` +// ServerConfigs creates a server for the feast service, with specified container configurations. +type ServerConfigs struct { + ContainerConfigs `json:",inline"` + TLS *TlsConfigs `json:"tls,omitempty"` + // LogLevel sets the logging level for the server + // Allowed values: "debug", "info", "warning", "error", "critical". + // +kubebuilder:validation:Enum=debug;info;warning;error;critical + LogLevel *string `json:"logLevel,omitempty"` + // VolumeMounts defines the list of volumes that should be mounted into the feast container. + // This allows attaching persistent storage, config files, secrets, or other resources + // required by the Feast components. Ensure that each volume mount has a corresponding + // volume definition in the Volumes field. + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` } -// DefaultConfigs k8s container settings that are applied by default -type DefaultConfigs struct { +// ContainerConfigs k8s container settings for the server +type ContainerConfigs struct { + DefaultCtrConfigs `json:",inline"` + OptionalCtrConfigs `json:",inline"` +} + +// DefaultCtrConfigs k8s container settings that are applied by default +type DefaultCtrConfigs struct { Image *string `json:"image,omitempty"` } -// OptionalConfigs k8s container settings that are optional -type OptionalConfigs struct { +// OptionalCtrConfigs k8s container settings that are optional +type OptionalCtrConfigs struct { Env *[]corev1.EnvVar `json:"env,omitempty"` + EnvFrom *[]corev1.EnvFromSource `json:"envFrom,omitempty"` ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` Resources *corev1.ResourceRequirements `json:"resources,omitempty"` } +// AuthzConfig defines the authorization settings for the deployed Feast services. +// +kubebuilder:validation:XValidation:rule="[has(self.kubernetes), has(self.oidc)].exists_one(c, c)",message="One selection required between kubernetes or oidc." +type AuthzConfig struct { + KubernetesAuthz *KubernetesAuthz `json:"kubernetes,omitempty"` + OidcAuthz *OidcAuthz `json:"oidc,omitempty"` +} + +// KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. +// https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +type KubernetesAuthz struct { + // The Kubernetes RBAC roles to be deployed in the same namespace of the FeatureStore. + // Roles are managed by the operator and created with an empty list of rules. + // See the Feast permission model at https://docs.feast.dev/getting-started/concepts/permission + // The feature store admin is not obligated to manage roles using the Feast operator, roles can be managed independently. + // This configuration option is only providing a way to automate this procedure. + // Important note: the operator cannot ensure that these roles will match the ones used in the configured Feast permissions. + Roles []string `json:"roles,omitempty"` +} + +// OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. +// https://auth0.com/docs/authenticate/protocols/openid-connect-protocol +type OidcAuthz struct { + SecretRef corev1.LocalObjectReference `json:"secretRef"` +} + +// TlsConfigs configures server TLS for a feast service. in an openshift cluster, this is configured by default using service serving certificates. +// +kubebuilder:validation:XValidation:rule="(!has(self.disable) || !self.disable) ? has(self.secretRef) : true",message="`secretRef` required if `disable` is false." +type TlsConfigs struct { + // references the local k8s secret where the TLS key and cert reside + SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + SecretKeyNames SecretKeyNames `json:"secretKeyNames,omitempty"` + // will disable TLS for the feast service. useful in an openshift cluster, for example, where TLS is configured by default + Disable *bool `json:"disable,omitempty"` +} + +// `secretRef` required if `disable` is false. +func (tls *TlsConfigs) IsTLS() bool { + if tls != nil { + if tls.Disable != nil && *tls.Disable { + return false + } else if tls.SecretRef == nil { + return false + } + return true + } + return false +} + +// TlsRemoteRegistryConfigs configures client TLS for a remote feast registry. in an openshift cluster, this is configured by default when the remote feast registry is using service serving certificates. +type TlsRemoteRegistryConfigs struct { + // references the local k8s configmap where the TLS cert resides + ConfigMapRef corev1.LocalObjectReference `json:"configMapRef"` + // defines the configmap key name for the client TLS cert. + CertName string `json:"certName"` +} + +// SecretKeyNames defines the secret key names for the TLS key and cert. +type SecretKeyNames struct { + // defaults to "tls.crt" + TlsCrt string `json:"tlsCrt,omitempty"` + // defaults to "tls.key" + TlsKey string `json:"tlsKey,omitempty"` +} + // FeatureStoreStatus defines the observed state of FeatureStore type FeatureStoreStatus struct { // Shows the currently applied feast configuration, including any pertinent defaults Applied FeatureStoreSpec `json:"applied,omitempty"` // ConfigMap in this namespace containing a client `feature_store.yaml` for this feast deployment - ClientConfigMap string `json:"clientConfigMap,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` - // Version of feast that's currently deployed - FeastVersion string `json:"feastVersion,omitempty"` - Phase string `json:"phase,omitempty"` - ServiceHostnames ServiceHostnames `json:"serviceHostnames,omitempty"` + ClientConfigMap string `json:"clientConfigMap,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` + FeastVersion string `json:"feastVersion,omitempty"` + Phase string `json:"phase,omitempty"` + ServiceHostnames ServiceHostnames `json:"serviceHostnames,omitempty"` } // ServiceHostnames defines the service hostnames in the format of :, e.g. example.svc.cluster.local:80 @@ -144,13 +434,14 @@ type ServiceHostnames struct { OfflineStore string `json:"offlineStore,omitempty"` OnlineStore string `json:"onlineStore,omitempty"` Registry string `json:"registry,omitempty"` + UI string `json:"ui,omitempty"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:shortName=feast -//+kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.phase` -//+kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=feast +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // FeatureStore is the Schema for the featurestores API type FeatureStore struct { @@ -161,7 +452,7 @@ type FeatureStore struct { Status FeatureStoreStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // FeatureStoreList contains a list of FeatureStore type FeatureStoreList struct { diff --git a/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go b/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go index f37c8942ad2..87e5b7164af 100644 --- a/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -21,13 +21,56 @@ limitations under the License. package v1alpha1 import ( + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultConfigs) DeepCopyInto(out *DefaultConfigs) { +func (in *AuthzConfig) DeepCopyInto(out *AuthzConfig) { + *out = *in + if in.KubernetesAuthz != nil { + in, out := &in.KubernetesAuthz, &out.KubernetesAuthz + *out = new(KubernetesAuthz) + (*in).DeepCopyInto(*out) + } + if in.OidcAuthz != nil { + in, out := &in.OidcAuthz, &out.OidcAuthz + *out = new(OidcAuthz) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthzConfig. +func (in *AuthzConfig) DeepCopy() *AuthzConfig { + if in == nil { + return nil + } + out := new(AuthzConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerConfigs) DeepCopyInto(out *ContainerConfigs) { + *out = *in + in.DefaultCtrConfigs.DeepCopyInto(&out.DefaultCtrConfigs) + in.OptionalCtrConfigs.DeepCopyInto(&out.OptionalCtrConfigs) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfigs. +func (in *ContainerConfigs) DeepCopy() *ContainerConfigs { + if in == nil { + return nil + } + out := new(ContainerConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultCtrConfigs) DeepCopyInto(out *DefaultCtrConfigs) { *out = *in if in.Image != nil { in, out := &in.Image, &out.Image @@ -36,12 +79,52 @@ func (in *DefaultConfigs) DeepCopyInto(out *DefaultConfigs) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultConfigs. -func (in *DefaultConfigs) DeepCopy() *DefaultConfigs { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultCtrConfigs. +func (in *DefaultCtrConfigs) DeepCopy() *DefaultCtrConfigs { if in == nil { return nil } - out := new(DefaultConfigs) + out := new(DefaultCtrConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeastInitOptions) DeepCopyInto(out *FeastInitOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeastInitOptions. +func (in *FeastInitOptions) DeepCopy() *FeastInitOptions { + if in == nil { + return nil + } + out := new(FeastInitOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeastProjectDir) DeepCopyInto(out *FeastProjectDir) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitCloneOptions) + (*in).DeepCopyInto(*out) + } + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(FeastInitOptions) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeastProjectDir. +func (in *FeastProjectDir) DeepCopy() *FeastProjectDir { + if in == nil { + return nil + } + out := new(FeastProjectDir) in.DeepCopyInto(out) return out } @@ -138,6 +221,23 @@ func (in *FeatureStoreServices) DeepCopyInto(out *FeatureStoreServices) { *out = new(Registry) (*in).DeepCopyInto(*out) } + if in.UI != nil { + in, out := &in.UI, &out.UI + *out = new(ServerConfigs) + (*in).DeepCopyInto(*out) + } + if in.DeploymentStrategy != nil { + in, out := &in.DeploymentStrategy, &out.DeploymentStrategy + *out = new(appsv1.DeploymentStrategy) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStoreServices. @@ -153,11 +253,21 @@ func (in *FeatureStoreServices) DeepCopy() *FeatureStoreServices { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureStoreSpec) DeepCopyInto(out *FeatureStoreSpec) { *out = *in + if in.FeastProjectDir != nil { + in, out := &in.FeastProjectDir, &out.FeastProjectDir + *out = new(FeastProjectDir) + (*in).DeepCopyInto(*out) + } if in.Services != nil { in, out := &in.Services, &out.Services *out = new(FeatureStoreServices) (*in).DeepCopyInto(*out) } + if in.AuthzConfig != nil { + in, out := &in.AuthzConfig, &out.AuthzConfig + *out = new(AuthzConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStoreSpec. @@ -194,10 +304,83 @@ func (in *FeatureStoreStatus) DeepCopy() *FeatureStoreStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitCloneOptions) DeepCopyInto(out *GitCloneOptions) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = new([]v1.EnvVar) + if **in != nil { + in, out := *in, *out + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = new([]v1.EnvFromSource) + if **in != nil { + in, out := *in, *out + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitCloneOptions. +func (in *GitCloneOptions) DeepCopy() *GitCloneOptions { + if in == nil { + return nil + } + out := new(GitCloneOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesAuthz) DeepCopyInto(out *KubernetesAuthz) { + *out = *in + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthz. +func (in *KubernetesAuthz) DeepCopy() *KubernetesAuthz { + if in == nil { + return nil + } + out := new(KubernetesAuthz) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalRegistryConfig) DeepCopyInto(out *LocalRegistryConfig) { *out = *in - in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ServerConfigs) + (*in).DeepCopyInto(*out) + } + if in.Persistence != nil { + in, out := &in.Persistence, &out.Persistence + *out = new(RegistryPersistence) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalRegistryConfig. @@ -213,7 +396,16 @@ func (in *LocalRegistryConfig) DeepCopy() *LocalRegistryConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OfflineStore) DeepCopyInto(out *OfflineStore) { *out = *in - in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ServerConfigs) + (*in).DeepCopyInto(*out) + } + if in.Persistence != nil { + in, out := &in.Persistence, &out.Persistence + *out = new(OfflineStorePersistence) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStore. @@ -226,10 +418,96 @@ func (in *OfflineStore) DeepCopy() *OfflineStore { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStoreDBStorePersistence) DeepCopyInto(out *OfflineStoreDBStorePersistence) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStoreDBStorePersistence. +func (in *OfflineStoreDBStorePersistence) DeepCopy() *OfflineStoreDBStorePersistence { + if in == nil { + return nil + } + out := new(OfflineStoreDBStorePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStoreFilePersistence) DeepCopyInto(out *OfflineStoreFilePersistence) { + *out = *in + if in.PvcConfig != nil { + in, out := &in.PvcConfig, &out.PvcConfig + *out = new(PvcConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStoreFilePersistence. +func (in *OfflineStoreFilePersistence) DeepCopy() *OfflineStoreFilePersistence { + if in == nil { + return nil + } + out := new(OfflineStoreFilePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineStorePersistence) DeepCopyInto(out *OfflineStorePersistence) { + *out = *in + if in.FilePersistence != nil { + in, out := &in.FilePersistence, &out.FilePersistence + *out = new(OfflineStoreFilePersistence) + (*in).DeepCopyInto(*out) + } + if in.DBPersistence != nil { + in, out := &in.DBPersistence, &out.DBPersistence + *out = new(OfflineStoreDBStorePersistence) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineStorePersistence. +func (in *OfflineStorePersistence) DeepCopy() *OfflineStorePersistence { + if in == nil { + return nil + } + out := new(OfflineStorePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OidcAuthz) DeepCopyInto(out *OidcAuthz) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OidcAuthz. +func (in *OidcAuthz) DeepCopy() *OidcAuthz { + if in == nil { + return nil + } + out := new(OidcAuthz) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OnlineStore) DeepCopyInto(out *OnlineStore) { *out = *in - in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ServerConfigs) + (*in).DeepCopyInto(*out) + } + if in.Persistence != nil { + in, out := &in.Persistence, &out.Persistence + *out = new(OnlineStorePersistence) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStore. @@ -243,7 +521,68 @@ func (in *OnlineStore) DeepCopy() *OnlineStore { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OptionalConfigs) DeepCopyInto(out *OptionalConfigs) { +func (in *OnlineStoreDBStorePersistence) DeepCopyInto(out *OnlineStoreDBStorePersistence) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStoreDBStorePersistence. +func (in *OnlineStoreDBStorePersistence) DeepCopy() *OnlineStoreDBStorePersistence { + if in == nil { + return nil + } + out := new(OnlineStoreDBStorePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineStoreFilePersistence) DeepCopyInto(out *OnlineStoreFilePersistence) { + *out = *in + if in.PvcConfig != nil { + in, out := &in.PvcConfig, &out.PvcConfig + *out = new(PvcConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStoreFilePersistence. +func (in *OnlineStoreFilePersistence) DeepCopy() *OnlineStoreFilePersistence { + if in == nil { + return nil + } + out := new(OnlineStoreFilePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineStorePersistence) DeepCopyInto(out *OnlineStorePersistence) { + *out = *in + if in.FilePersistence != nil { + in, out := &in.FilePersistence, &out.FilePersistence + *out = new(OnlineStoreFilePersistence) + (*in).DeepCopyInto(*out) + } + if in.DBPersistence != nil { + in, out := &in.DBPersistence, &out.DBPersistence + *out = new(OnlineStoreDBStorePersistence) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineStorePersistence. +func (in *OnlineStorePersistence) DeepCopy() *OnlineStorePersistence { + if in == nil { + return nil + } + out := new(OnlineStorePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionalCtrConfigs) DeepCopyInto(out *OptionalCtrConfigs) { *out = *in if in.Env != nil { in, out := &in.Env, &out.Env @@ -256,6 +595,17 @@ func (in *OptionalConfigs) DeepCopyInto(out *OptionalConfigs) { } } } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = new([]v1.EnvFromSource) + if **in != nil { + in, out := *in, *out + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } if in.ImagePullPolicy != nil { in, out := &in.ImagePullPolicy, &out.ImagePullPolicy *out = new(v1.PullPolicy) @@ -268,12 +618,63 @@ func (in *OptionalConfigs) DeepCopyInto(out *OptionalConfigs) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalConfigs. -func (in *OptionalConfigs) DeepCopy() *OptionalConfigs { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalCtrConfigs. +func (in *OptionalCtrConfigs) DeepCopy() *OptionalCtrConfigs { + if in == nil { + return nil + } + out := new(OptionalCtrConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PvcConfig) DeepCopyInto(out *PvcConfig) { + *out = *in + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(PvcCreate) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PvcConfig. +func (in *PvcConfig) DeepCopy() *PvcConfig { + if in == nil { + return nil + } + out := new(PvcConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PvcCreate) DeepCopyInto(out *PvcCreate) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]v1.PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PvcCreate. +func (in *PvcCreate) DeepCopy() *PvcCreate { if in == nil { return nil } - out := new(OptionalConfigs) + out := new(PvcCreate) in.DeepCopyInto(out) return out } @@ -303,6 +704,78 @@ func (in *Registry) DeepCopy() *Registry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryDBStorePersistence) DeepCopyInto(out *RegistryDBStorePersistence) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryDBStorePersistence. +func (in *RegistryDBStorePersistence) DeepCopy() *RegistryDBStorePersistence { + if in == nil { + return nil + } + out := new(RegistryDBStorePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryFilePersistence) DeepCopyInto(out *RegistryFilePersistence) { + *out = *in + if in.PvcConfig != nil { + in, out := &in.PvcConfig, &out.PvcConfig + *out = new(PvcConfig) + (*in).DeepCopyInto(*out) + } + if in.S3AdditionalKwargs != nil { + in, out := &in.S3AdditionalKwargs, &out.S3AdditionalKwargs + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryFilePersistence. +func (in *RegistryFilePersistence) DeepCopy() *RegistryFilePersistence { + if in == nil { + return nil + } + out := new(RegistryFilePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryPersistence) DeepCopyInto(out *RegistryPersistence) { + *out = *in + if in.FilePersistence != nil { + in, out := &in.FilePersistence, &out.FilePersistence + *out = new(RegistryFilePersistence) + (*in).DeepCopyInto(*out) + } + if in.DBPersistence != nil { + in, out := &in.DBPersistence, &out.DBPersistence + *out = new(RegistryDBStorePersistence) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPersistence. +func (in *RegistryPersistence) DeepCopy() *RegistryPersistence { + if in == nil { + return nil + } + out := new(RegistryPersistence) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteRegistryConfig) DeepCopyInto(out *RemoteRegistryConfig) { *out = *in @@ -316,6 +789,11 @@ func (in *RemoteRegistryConfig) DeepCopyInto(out *RemoteRegistryConfig) { *out = new(FeatureStoreRef) **out = **in } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TlsRemoteRegistryConfigs) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteRegistryConfig. @@ -329,18 +807,49 @@ func (in *RemoteRegistryConfig) DeepCopy() *RemoteRegistryConfig { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceConfigs) DeepCopyInto(out *ServiceConfigs) { +func (in *SecretKeyNames) DeepCopyInto(out *SecretKeyNames) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeyNames. +func (in *SecretKeyNames) DeepCopy() *SecretKeyNames { + if in == nil { + return nil + } + out := new(SecretKeyNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerConfigs) DeepCopyInto(out *ServerConfigs) { *out = *in - in.DefaultConfigs.DeepCopyInto(&out.DefaultConfigs) - in.OptionalConfigs.DeepCopyInto(&out.OptionalConfigs) + in.ContainerConfigs.DeepCopyInto(&out.ContainerConfigs) + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TlsConfigs) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConfigs. -func (in *ServiceConfigs) DeepCopy() *ServiceConfigs { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfigs. +func (in *ServerConfigs) DeepCopy() *ServerConfigs { if in == nil { return nil } - out := new(ServiceConfigs) + out := new(ServerConfigs) in.DeepCopyInto(out) return out } @@ -359,3 +868,45 @@ func (in *ServiceHostnames) DeepCopy() *ServiceHostnames { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TlsConfigs) DeepCopyInto(out *TlsConfigs) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + out.SecretKeyNames = in.SecretKeyNames + if in.Disable != nil { + in, out := &in.Disable, &out.Disable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TlsConfigs. +func (in *TlsConfigs) DeepCopy() *TlsConfigs { + if in == nil { + return nil + } + out := new(TlsConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TlsRemoteRegistryConfigs) DeepCopyInto(out *TlsRemoteRegistryConfigs) { + *out = *in + out.ConfigMapRef = in.ConfigMapRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TlsRemoteRegistryConfigs. +func (in *TlsRemoteRegistryConfigs) DeepCopy() *TlsRemoteRegistryConfigs { + if in == nil { + return nil + } + out := new(TlsRemoteRegistryConfigs) + in.DeepCopyInto(out) + return out +} diff --git a/infra/feast-operator/bundle.Dockerfile b/infra/feast-operator/bundle.Dockerfile index ab3f14a9da4..685b137b92a 100644 --- a/infra/feast-operator/bundle.Dockerfile +++ b/infra/feast-operator/bundle.Dockerfile @@ -6,7 +6,7 @@ LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ LABEL operators.operatorframework.io.bundle.package.v1=feast-operator LABEL operators.operatorframework.io.bundle.channels.v1=alpha -LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.37.0 +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.38.0 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 diff --git a/infra/feast-operator/bundle/manifests/feast-operator-controller-manager-metrics-service_v1_service.yaml b/infra/feast-operator/bundle/manifests/feast-operator-controller-manager-metrics-service_v1_service.yaml index e0cd9dc2545..913517e198a 100644 --- a/infra/feast-operator/bundle/manifests/feast-operator-controller-manager-metrics-service_v1_service.yaml +++ b/infra/feast-operator/bundle/manifests/feast-operator-controller-manager-metrics-service_v1_service.yaml @@ -12,7 +12,7 @@ spec: - name: https port: 8443 protocol: TCP - targetPort: https + targetPort: 8443 selector: control-plane: controller-manager status: diff --git a/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml b/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml index 245db443581..734508cfecb 100644 --- a/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml +++ b/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml @@ -13,13 +13,34 @@ metadata: "spec": { "feastProject": "my_project" } + }, + { + "apiVersion": "feast.dev/v1alpha1", + "kind": "FeatureStore", + "metadata": { + "name": "sample-all-servers" + }, + "spec": { + "feastProject": "my_project", + "services": { + "offlineStore": { + "server": {} + }, + "registry": { + "local": { + "server": {} + } + }, + "ui": {} + } + } } ] capabilities: Basic Install - createdAt: "2024-11-01T13:05:11Z" - operators.operatorframework.io/builder: operator-sdk-v1.37.0 + createdAt: "2025-02-17T22:19:00Z" + operators.operatorframework.io/builder: operator-sdk-v1.38.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 - name: feast-operator.v0.41.0 + name: feast-operator.v0.46.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -54,6 +75,8 @@ spec: - "" resources: - configmaps + - persistentvolumeclaims + - serviceaccounts - services verbs: - create @@ -62,6 +85,13 @@ spec: - list - update - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list - apiGroups: - feast.dev resources: @@ -88,6 +118,29 @@ spec: - get - patch - update + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - authentication.k8s.io resources: @@ -122,35 +175,15 @@ spec: spec: containers: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --metrics-bind-address=:8443 - --leader-elect + - --health-probe-bind-address=:8081 command: - /manager - image: feastdev/feast-operator:0.41.0 + env: + - name: RELATED_IMAGE_FEATURE_SERVER + value: docker.io/feastdev/feature-server:0.46.0 + image: feastdev/feast-operator:0.46.0 livenessProbe: httpGet: path: /healthz @@ -239,4 +272,7 @@ spec: provider: name: Feast Community url: https://lf-aidata.atlassian.net/wiki/spaces/FEAST/ - version: 0.41.0 + relatedImages: + - image: docker.io/feastdev/feature-server:0.46.0 + name: feature-server + version: 0.46.0 diff --git a/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml b/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml index 2142e093eb1..003babbccca 100644 --- a/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml +++ b/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 creationTimestamp: null name: featurestores.feast.dev spec: @@ -29,516 +29,849 @@ spec: description: FeatureStore is the Schema for the featurestores API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: APIVersion defines the versioned schema of this representation + of an object. type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: Kind is a string value representing the REST resource this + object represents. type: string metadata: type: object spec: description: FeatureStoreSpec defines the desired state of FeatureStore properties: + authz: + description: AuthzConfig defines the authorization settings for the + deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed in the + same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, c)' feastProject: - description: FeastProject is the Feast project id. This can be any - alphanumeric string with underscores, but it cannot start with an - underscore. Required. + description: FeastProject is the Feast project id. pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ type: string services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be unavailable + during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean offlineStore: - description: OfflineStore configures the deployed offline store - service + description: OfflineStore configures the offline store service properties: - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures the + file-based persistence for the offline store service + properties: + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - key: - description: The key to select. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: - description: Specify whether the ConfigMap or - its key must be defined + description: Specify whether the ConfigMap must + be defined type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource type: object x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: - description: Specify whether the Secret or its - key must be defined + description: Specify whether the Secret must + be defined type: boolean - required: - - key type: object x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image - type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string name: + description: This must match the Name of a Volume. + type: string + readOnly: description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. type: string required: + - mountPath - name type: object type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object type: object type: object onlineStore: - description: OnlineStore configures the deployed online store - service + description: OnlineStore configures the online store service properties: - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures the + file-based persistence for the online store service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with no + slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - key: - description: The key to select. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: - description: Specify whether the ConfigMap or - its key must be defined + description: Specify whether the ConfigMap must + be defined type: boolean - required: - - key type: object x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: - description: Specify whether the Secret or its - key must be defined + description: Specify whether the Secret must + be defined type: boolean - required: - - key type: object x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image - type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - registry: - description: Registry configures the registry service. One selection - is required. Local is the default setting. - properties: - local: - description: LocalRegistryConfig configures the deployed registry - service - properties: - env: - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object type: array image: type: string @@ -546,6 +879,17 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -554,13 +898,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -568,8 +905,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -587,7 +923,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -596,651 +932,5598 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. type: object type: object - type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. - properties: - feastRef: - description: Reference to an existing `FeatureStore` CR - in the same k8s cluster. + tls: + description: TlsConfigs configures server TLS for a feast + service. properties: - name: - description: Name of the FeatureStore - type: string - namespace: - description: Namespace of the FeatureStore - type: string - required: - - name + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, c)' - type: object - required: - - feastProject - type: object - status: - description: FeatureStoreStatus defines the observed state of FeatureStore - properties: - applied: - description: Shows the currently applied feast configuration, including - any pertinent defaults - properties: - feastProject: - description: FeastProject is the Feast project id. This can be - any alphanumeric string with underscores, but it cannot start - with an underscore. Required. - pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ - type: string - services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. + registry: + description: Registry configures the registry service. One selection + is required. Local is the default setting. properties: - offlineStore: - description: OfflineStore configures the deployed offline - store service + local: + description: LocalRegistryConfig configures the registry service properties: - env: - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures the + file-based persistence for the registry service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object store + URIs. + rule: '(!has(self.pvc) && has(self.path)) ? (self.path.startsWith(''/'') + || self.path.startsWith(''s3://'') || self.path.startsWith(''gs://'')) + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 or + GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available only + for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - key: - description: The key to select. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap - or its key must be defined + must be defined type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource type: object x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret - or its key must be defined + must be defined type: boolean - required: - - key type: object x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - onlineStore: - description: OnlineStore configures the deployed online store - service - properties: - env: - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. properties: - key: - description: The key to select. - type: string name: description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic - fieldRef: + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image + type: array + type: object + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` CR + in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. + certName: + description: defines the configmap key name for the + client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap where + the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount of + compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. useful + in an openshift cluster, for example, where TLS is configured + by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key names + for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where the + TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes that + should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the FeatureStore + deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is + the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is + empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver that + handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone PVC + to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may be + used to set the VolumeAttributesClass used + by this claim. + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide identifiers + (wwids)\nEither wwids or combination of targetWWNs + and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set + permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod to + access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of + the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + status: + description: FeatureStoreStatus defines the observed state of FeatureStore + properties: + applied: + description: Shows the currently applied feast configuration, including + any pertinent defaults + properties: + authz: + description: AuthzConfig defines the authorization settings for + the deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed + in the same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, + c)' + feastProject: + description: FeastProject is the Feast project id. + pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ + type: string + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be + unavailable during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or + "RollingUpdate". Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service + properties: + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures + the file-based persistence for the offline store + service + properties: + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures + the file-based persistence for the online store + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS + buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One + selection is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry + service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures + the file-based persistence for the registry + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings + for a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new + PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to + ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the + storage resource requirements for + a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes + the minimum amount of compute + resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the + name of an existing StorageClass + to which this persistent volume + belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing + field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between + ref and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' + and must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object + store URIs. + rule: '(!has(self.pvc) && has(self.path)) ? + (self.path.startsWith(''/'') || self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: PVC path must be a file name only, + with no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 + or GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available + only for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store + "type" is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should + be placed as-is from the "feature_store.yaml" + under the secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type + you want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or + store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS + for a feast service. + properties: + disable: + description: will disable TLS for the feast + service. useful in an openshift cluster, + for example, where TLS is configured by + default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret + where the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` + is false.' + rule: '(!has(self.disable) || !self.disable) + ? has(self.secretRef) : true' + volumeMounts: + description: VolumeMounts defines the list of + volumes that should be mounted into the feast + container. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of + a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` + CR in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for + the client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap + where the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, + c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the + FeatureStore deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone + PVC to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may + be used to set the VolumeAttributesClass + used by this claim. + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide + identifiers (wwids)\nEither wwids or combination + of targetWWNs and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod + to access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. properties: name: + default: "" description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - required: - - name type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - registry: - description: Registry configures the registry service. One - selection is required. Local is the default setting. - properties: - local: - description: LocalRegistryConfig configures the deployed - registry service - properties: - env: + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer items: - description: EnvVar represents an environment variable - present in a Container. + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: + default: "" description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to - select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. - properties: - feastRef: - description: Reference to an existing `FeatureStore` - CR in the same k8s cluster. - properties: - name: - description: Name of the FeatureStore - type: string - namespace: - description: Namespace of the FeatureStore - type: string - required: - - name - type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, - c)' + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array type: object required: - feastProject @@ -1251,21 +6534,12 @@ spec: type: string conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. format: date-time type: string message: @@ -1277,18 +6551,13 @@ spec: observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + For instance, if . format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -1304,9 +6573,7 @@ spec: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + Many .condition. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1319,7 +6586,6 @@ spec: type: object type: array feastVersion: - description: Version of feast that's currently deployed type: string phase: type: string @@ -1333,6 +6599,8 @@ spec: type: string registry: type: string + ui: + type: string type: object type: object type: object diff --git a/infra/feast-operator/bundle/metadata/annotations.yaml b/infra/feast-operator/bundle/metadata/annotations.yaml index bf929b9755b..5e280a43e24 100644 --- a/infra/feast-operator/bundle/metadata/annotations.yaml +++ b/infra/feast-operator/bundle/metadata/annotations.yaml @@ -5,7 +5,7 @@ annotations: operators.operatorframework.io.bundle.metadata.v1: metadata/ operators.operatorframework.io.bundle.package.v1: feast-operator operators.operatorframework.io.bundle.channels.v1: alpha - operators.operatorframework.io.metrics.builder: operator-sdk-v1.37.0 + operators.operatorframework.io.metrics.builder: operator-sdk-v1.38.0 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 diff --git a/infra/feast-operator/cmd/main.go b/infra/feast-operator/cmd/main.go index e132a6a3c9c..82f0fd2eeca 100644 --- a/infra/feast-operator/cmd/main.go +++ b/infra/feast-operator/cmd/main.go @@ -33,12 +33,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + routev1 "github.com/openshift/api/route/v1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller" - //+kubebuilder:scaffold:imports + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" + // +kubebuilder:scaffold:imports ) var ( @@ -48,9 +52,9 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - + utilruntime.Must(routev1.AddToScheme(scheme)) utilruntime.Must(feastdevv1alpha1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme } func main() { @@ -59,13 +63,15 @@ func main() { var probeAddr string var secureMetrics bool var enableHTTP2 bool - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.BoolVar(&secureMetrics, "metrics-secure", false, - "If set the metrics endpoint is served securely") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") opts := zap.Options{ @@ -87,7 +93,6 @@ func main() { c.NextProtos = []string{"http/1.1"} } - tlsOpts := []func(*tls.Config){} if !enableHTTP2 { tlsOpts = append(tlsOpts, disableHTTP2) } @@ -96,13 +101,33 @@ func main() { TLSOpts: tlsOpts, }) + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - Metrics: metricsserver.Options{ - BindAddress: metricsAddr, - SecureServing: secureMetrics, - TLSOpts: tlsOpts, - }, + Scheme: scheme, + Metrics: metricsServerOptions, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, @@ -132,6 +157,8 @@ func main() { os.Exit(1) } + services.SetIsOpenShift(mgr.GetConfig()) + if err = (&controller.FeatureStoreReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), @@ -139,7 +166,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "FeatureStore") os.Exit(1) } - //+kubebuilder:scaffold:builder + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") diff --git a/infra/feast-operator/config/component_metadata.yaml b/infra/feast-operator/config/component_metadata.yaml new file mode 100644 index 00000000000..cbe44e473af --- /dev/null +++ b/infra/feast-operator/config/component_metadata.yaml @@ -0,0 +1,5 @@ +# This file is required to configure Feast release information for ODH/RHOAI Operator +releases: + - name: Feast + version: 0.46.0 + repoUrl: https://github.com/feast-dev/feast diff --git a/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml b/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml index b4c17b5eb80..4bb7227f856 100644 --- a/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml +++ b/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: featurestores.feast.dev spec: group: feast.dev @@ -29,39 +29,79 @@ spec: description: FeatureStore is the Schema for the featurestores API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: APIVersion defines the versioned schema of this representation + of an object. type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: Kind is a string value representing the REST resource this + object represents. type: string metadata: type: object spec: description: FeatureStoreSpec defines the desired state of FeatureStore properties: + authz: + description: AuthzConfig defines the authorization settings for the + deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed in the + same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, c)' feastProject: - description: FeastProject is the Feast project id. This can be any - alphanumeric string with underscores, but it cannot start with an - underscore. Required. + description: FeastProject is the Feast project id. pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ type: string - services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. properties: - offlineStore: - description: OfflineStore configures the deployed offline store - service + git: + description: GitCloneOptions describes how a clone should be performed. properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object env: items: description: EnvVar represents an environment variable present @@ -75,13 +115,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's value. @@ -94,10 +128,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap or @@ -108,9 +143,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -127,7 +162,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for volumes, @@ -157,10 +192,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret or its @@ -175,256 +211,268 @@ spec: - name type: object type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image - type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: name: + default: "" description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - required: - - name + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - onlineStore: - description: OnlineStore configures the deployed online store - service - properties: - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + secretRef: + description: The Secret to select from properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: + name: + default: "" description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean type: object - required: - - name + x-kubernetes-map-type: atomic type: object type: array - image: + featureRepoPath: + description: FeatureRepoPath is the relative path to the feature + repo subdirectory. Default is 'feature_repo'. type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image + ref: + description: Reference to a branch / tag / commit type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true + maxSurge: + anyOf: + - type: integer + - type: string description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be unavailable + during the update. + x-kubernetes-int-or-string: true type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string type: object - registry: - description: Registry configures the registry service. One selection - is required. Local is the default setting. + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service properties: - local: - description: LocalRegistryConfig configures the deployed registry - service + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures the + file-based persistence for the offline store service + properties: + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a remote offline server container properties: env: items: @@ -439,13 +487,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's @@ -458,10 +500,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap @@ -472,9 +515,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -491,7 +534,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for @@ -522,10 +565,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret @@ -540,12 +584,66 @@ spec: - name type: object type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array image: type: string imagePullPolicy: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -554,13 +652,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -568,8 +659,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -587,7 +677,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -596,69 +686,248 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service properties: - feastRef: - description: Reference to an existing `FeatureStore` CR - in the same k8s cluster. + file: + description: OnlineStoreFilePersistence configures the + file-based persistence for the online store service properties: - name: - description: Name of the FeatureStore + path: type: string - namespace: - description: Namespace of the FeatureStore + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with no + slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus type: string required: - - name + - secretRef + - type type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string type: object x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, c)' - type: object - required: - - feastProject - type: object - status: - description: FeatureStoreStatus defines the observed state of FeatureStore - properties: - applied: - description: Shows the currently applied feast configuration, including - any pertinent defaults - properties: - feastProject: - description: FeastProject is the Feast project id. This can be - any alphanumeric string with underscores, but it cannot start - with an underscore. Required. - pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ - type: string - services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. - properties: - offlineStore: - description: OfflineStore configures the deployed offline - store service + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a feature server container properties: env: items: @@ -673,13 +942,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's @@ -692,10 +955,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap @@ -706,9 +970,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -725,7 +989,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for @@ -756,10 +1020,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret @@ -774,12 +1039,66 @@ spec: - name type: object type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array image: type: string imagePullPolicy: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -788,13 +1107,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -802,8 +1114,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -821,7 +1132,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -830,228 +1141,271 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. type: object type: object - type: object - onlineStore: - description: OnlineStore configures the deployed online store - service - properties: - env: + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. items: - description: EnvVar represents an environment variable - present in a Container. + description: VolumeMount describes a mounting of a Volume + within a container. properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: This must match the Name of a Volume. type: string - value: + readOnly: description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One selection + is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures the + file-based persistence for the registry service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object store + URIs. + rule: '(!has(self.pvc) && has(self.path)) ? (self.path.startsWith(''/'') + || self.path.startsWith(''s3://'') || self.path.startsWith(''gs://'')) + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 or + GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available only + for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - registry: - description: Registry configures the registry service. One - selection is required. Local is the default setting. - properties: - local: - description: LocalRegistryConfig configures the deployed - registry service - properties: - env: - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: configMapKeyRef: description: Selects a key of a ConfigMap. @@ -1060,10 +1414,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap @@ -1074,9 +1429,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' properties: apiVersion: description: Version of the schema the @@ -1094,7 +1449,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required @@ -1127,10 +1482,11 @@ spec: key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret @@ -1145,12 +1501,66 @@ spec: - name type: object type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array image: type: string imagePullPolicy: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -1159,13 +1569,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1173,8 +1576,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -1192,7 +1594,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -1201,97 +1603,5385 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string type: object + x-kubernetes-map-type: atomic type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` CR + in the same k8s cluster. properties: - feastRef: - description: Reference to an existing `FeatureStore` - CR in the same k8s cluster. + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for the + client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap where + the TLS cert resides properties: name: - description: Name of the FeatureStore - type: string - namespace: - description: Namespace of the FeatureStore + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - required: - - name type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' type: object x-kubernetes-validations: - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, c)' type: object - required: - - feastProject - type: object - clientConfigMap: - description: ConfigMap in this namespace containing a client `feature_store.yaml` - for this feast deployment - type: string - conditions: - items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount of + compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. useful + in an openshift cluster, for example, where TLS is configured + by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key names + for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where the + TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes that + should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the FeatureStore + deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is + the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is + empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver that + handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone PVC + to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may be + used to set the VolumeAttributesClass used + by this claim. + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide identifiers + (wwids)\nEither wwids or combination of targetWWNs + and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set + permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod to + access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of + the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + status: + description: FeatureStoreStatus defines the observed state of FeatureStore + properties: + applied: + description: Shows the currently applied feast configuration, including + any pertinent defaults + properties: + authz: + description: AuthzConfig defines the authorization settings for + the deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed + in the same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, + c)' + feastProject: + description: FeastProject is the Feast project id. + pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ + type: string + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. + properties: + git: + description: GitCloneOptions describes how a clone should + be performed. + properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + featureRepoPath: + description: FeatureRepoPath is the relative path to the + feature repo subdirectory. Default is 'feature_repo'. + type: string + ref: + description: Reference to a branch / tag / commit + type: string + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast + init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be + unavailable during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or + "RollingUpdate". Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service + properties: + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures + the file-based persistence for the offline store + service + properties: + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures + the file-based persistence for the online store + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS + buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One + selection is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry + service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures + the file-based persistence for the registry + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings + for a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new + PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to + ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the + storage resource requirements for + a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes + the minimum amount of compute + resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the + name of an existing StorageClass + to which this persistent volume + belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing + field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between + ref and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' + and must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object + store URIs. + rule: '(!has(self.pvc) && has(self.path)) ? + (self.path.startsWith(''/'') || self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: PVC path must be a file name only, + with no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 + or GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available + only for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store + "type" is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should + be placed as-is from the "feature_store.yaml" + under the secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type + you want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or + store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS + for a feast service. + properties: + disable: + description: will disable TLS for the feast + service. useful in an openshift cluster, + for example, where TLS is configured by + default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret + where the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` + is false.' + rule: '(!has(self.disable) || !self.disable) + ? has(self.secretRef) : true' + volumeMounts: + description: VolumeMounts defines the list of + volumes that should be mounted into the feast + container. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of + a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` + CR in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for + the client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap + where the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, + c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the + FeatureStore deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone + PVC to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may + be used to set the VolumeAttributesClass + used by this claim. + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide + identifiers (wwids)\nEither wwids or combination + of targetWWNs and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod + to access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + clientConfigMap: + description: ConfigMap in this namespace containing a client `feature_store.yaml` + for this feast deployment + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if . + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. @@ -1304,9 +6994,7 @@ spec: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + Many .condition. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1319,7 +7007,6 @@ spec: type: object type: array feastVersion: - description: Version of feast that's currently deployed type: string phase: type: string @@ -1333,6 +7020,8 @@ spec: type: string registry: type: string + ui: + type: string type: object type: object type: object diff --git a/infra/feast-operator/config/default/kustomization.yaml b/infra/feast-operator/config/default/kustomization.yaml index 957965b9b35..ca573154247 100644 --- a/infra/feast-operator/config/default/kustomization.yaml +++ b/infra/feast-operator/config/default/kustomization.yaml @@ -25,12 +25,20 @@ resources: #- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml + patches: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- path: manager_auth_proxy_patch.yaml +- path: related_image_fs_patch.yaml + target: + kind: Deployment +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml diff --git a/infra/feast-operator/config/default/manager_auth_proxy_patch.yaml b/infra/feast-operator/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 4c3c27602f5..00000000000 --- a/infra/feast-operator/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" diff --git a/infra/feast-operator/config/default/manager_metrics_patch.yaml b/infra/feast-operator/config/default/manager_metrics_patch.yaml new file mode 100644 index 00000000000..2aaef6536f4 --- /dev/null +++ b/infra/feast-operator/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/infra/feast-operator/config/rbac/auth_proxy_service.yaml b/infra/feast-operator/config/default/metrics_service.yaml similarity index 94% rename from infra/feast-operator/config/rbac/auth_proxy_service.yaml rename to infra/feast-operator/config/default/metrics_service.yaml index c2bf4e37939..0207c0469d4 100644 --- a/infra/feast-operator/config/rbac/auth_proxy_service.yaml +++ b/infra/feast-operator/config/default/metrics_service.yaml @@ -12,6 +12,6 @@ spec: - name: https port: 8443 protocol: TCP - targetPort: https + targetPort: 8443 selector: control-plane: controller-manager diff --git a/infra/feast-operator/config/default/related_image_fs_patch.tmpl b/infra/feast-operator/config/default/related_image_fs_patch.tmpl new file mode 100644 index 00000000000..f3508836a86 --- /dev/null +++ b/infra/feast-operator/config/default/related_image_fs_patch.tmpl @@ -0,0 +1,5 @@ +- op: replace + path: "/spec/template/spec/containers/0/env/0" + value: + name: RELATED_IMAGE_FEATURE_SERVER + value: ${FS_IMG} diff --git a/infra/feast-operator/config/default/related_image_fs_patch.yaml b/infra/feast-operator/config/default/related_image_fs_patch.yaml new file mode 100644 index 00000000000..890e4a8f45c --- /dev/null +++ b/infra/feast-operator/config/default/related_image_fs_patch.yaml @@ -0,0 +1,5 @@ +- op: replace + path: "/spec/template/spec/containers/0/env/0" + value: + name: RELATED_IMAGE_FEATURE_SERVER + value: docker.io/feastdev/feature-server:0.46.0 diff --git a/infra/feast-operator/config/manager/kustomization.yaml b/infra/feast-operator/config/manager/kustomization.yaml index 253475b945b..bdf2ea9398c 100644 --- a/infra/feast-operator/config/manager/kustomization.yaml +++ b/infra/feast-operator/config/manager/kustomization.yaml @@ -5,4 +5,4 @@ kind: Kustomization images: - name: controller newName: feastdev/feast-operator - newTag: 0.41.0 + newTag: 0.46.0 diff --git a/infra/feast-operator/config/manager/manager.yaml b/infra/feast-operator/config/manager/manager.yaml index 90ef7b48635..e7550f0db7f 100644 --- a/infra/feast-operator/config/manager/manager.yaml +++ b/infra/feast-operator/config/manager/manager.yaml @@ -62,6 +62,7 @@ spec: - /manager args: - --leader-elect + - --health-probe-bind-address=:8081 image: controller:latest name: manager securityContext: @@ -69,6 +70,9 @@ spec: capabilities: drop: - "ALL" + env: + - name: RELATED_IMAGE_FEATURE_SERVER + value: feast:latest livenessProbe: httpGet: path: /healthz diff --git a/infra/feast-operator/config/overlays/odh/delete-namespace.yaml b/infra/feast-operator/config/overlays/odh/delete-namespace.yaml new file mode 100644 index 00000000000..9a52c0573de --- /dev/null +++ b/infra/feast-operator/config/overlays/odh/delete-namespace.yaml @@ -0,0 +1,5 @@ +$patch: delete +apiVersion: v1 +kind: Namespace +metadata: + name: system diff --git a/infra/feast-operator/config/overlays/odh/kustomization.yaml b/infra/feast-operator/config/overlays/odh/kustomization.yaml new file mode 100644 index 00000000000..508757e76fb --- /dev/null +++ b/infra/feast-operator/config/overlays/odh/kustomization.yaml @@ -0,0 +1,44 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: opendatahub + + +resources: + - ../../default + + +patches: + # patch to remove default `system` namespace in ../../manager/manager.yaml + - path: delete-namespace.yaml + +configMapGenerator: + - name: feast-operator-parameters + envs: + - params.env + +configurations: + - params.yaml + +replacements: + - source: + kind: ConfigMap + name: feast-operator-parameters + version: v1 + fieldPath: data.RELATED_IMAGE_FEAST_OPERATOR + targets: + - select: + kind: Deployment + name: controller-manager + fieldPaths: + - spec.template.spec.containers.[name=manager].image + - source: + kind: ConfigMap + name: feast-operator-parameters + fieldPath: data.RELATED_IMAGE_FEATURE_SERVER + targets: + - select: + kind: Deployment + name: controller-manager + fieldPaths: + - spec.template.spec.containers.[name=manager].env.[name=RELATED_IMAGE_FEATURE_SERVER].value diff --git a/infra/feast-operator/config/overlays/odh/params.env b/infra/feast-operator/config/overlays/odh/params.env new file mode 100644 index 00000000000..3e846e9ccc6 --- /dev/null +++ b/infra/feast-operator/config/overlays/odh/params.env @@ -0,0 +1,2 @@ +RELATED_IMAGE_FEAST_OPERATOR=docker.io/feastdev/feast-operator:0.46.0 +RELATED_IMAGE_FEATURE_SERVER=docker.io/feastdev/feature-server:0.46.0 \ No newline at end of file diff --git a/infra/feast-operator/config/overlays/odh/params.yaml b/infra/feast-operator/config/overlays/odh/params.yaml new file mode 100644 index 00000000000..43509ff293c --- /dev/null +++ b/infra/feast-operator/config/overlays/odh/params.yaml @@ -0,0 +1,3 @@ +varReference: + - path: spec/template/spec/containers[]/image + kind: Deployment diff --git a/infra/feast-operator/config/prometheus/monitor.yaml b/infra/feast-operator/config/prometheus/monitor.yaml index 55484079677..e76479a1305 100644 --- a/infra/feast-operator/config/prometheus/monitor.yaml +++ b/infra/feast-operator/config/prometheus/monitor.yaml @@ -11,10 +11,19 @@ metadata: spec: endpoints: - path: /metrics - port: https + port: https # Ensure this is the name of the port that exposes HTTPS metrics scheme: https bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key insecureSkipVerify: true selector: matchLabels: diff --git a/infra/feast-operator/config/rbac/auth_proxy_role.yaml b/infra/feast-operator/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 55f87916462..00000000000 --- a/infra/feast-operator/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: feast-operator - app.kubernetes.io/managed-by: kustomize - name: proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create diff --git a/infra/feast-operator/config/rbac/kustomization.yaml b/infra/feast-operator/config/rbac/kustomization.yaml index 5e4972b5397..d22437a5390 100644 --- a/infra/feast-operator/config/rbac/kustomization.yaml +++ b/infra/feast-operator/config/rbac/kustomization.yaml @@ -9,13 +9,15 @@ resources: - role_binding.yaml - leader_election_role.yaml - leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml # For each CRD, "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines diff --git a/infra/feast-operator/config/rbac/metrics_auth_role.yaml b/infra/feast-operator/config/rbac/metrics_auth_role.yaml new file mode 100644 index 00000000000..bee99788cf4 --- /dev/null +++ b/infra/feast-operator/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: feast-operator + app.kubernetes.io/managed-by: kustomize + name: metrics-auth-role +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/infra/feast-operator/config/rbac/auth_proxy_role_binding.yaml b/infra/feast-operator/config/rbac/metrics_auth_role_binding.yaml similarity index 84% rename from infra/feast-operator/config/rbac/auth_proxy_role_binding.yaml rename to infra/feast-operator/config/rbac/metrics_auth_role_binding.yaml index ffa85c82af6..f84b6c4160c 100644 --- a/infra/feast-operator/config/rbac/auth_proxy_role_binding.yaml +++ b/infra/feast-operator/config/rbac/metrics_auth_role_binding.yaml @@ -4,11 +4,11 @@ metadata: labels: app.kubernetes.io/name: feast-operator app.kubernetes.io/managed-by: kustomize - name: proxy-rolebinding + name: metrics-auth-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: metrics-auth-role subjects: - kind: ServiceAccount name: controller-manager diff --git a/infra/feast-operator/config/rbac/auth_proxy_client_clusterrole.yaml b/infra/feast-operator/config/rbac/metrics_reader_role.yaml similarity index 100% rename from infra/feast-operator/config/rbac/auth_proxy_client_clusterrole.yaml rename to infra/feast-operator/config/rbac/metrics_reader_role.yaml diff --git a/infra/feast-operator/config/rbac/role.yaml b/infra/feast-operator/config/rbac/role.yaml index 5ee64d47051..7fba75c23a4 100644 --- a/infra/feast-operator/config/rbac/role.yaml +++ b/infra/feast-operator/config/rbac/role.yaml @@ -19,6 +19,8 @@ rules: - "" resources: - configmaps + - persistentvolumeclaims + - serviceaccounts - services verbs: - create @@ -27,6 +29,13 @@ rules: - list - update - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list - apiGroups: - feast.dev resources: @@ -53,3 +62,26 @@ rules: - get - patch - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - update + - watch diff --git a/infra/feast-operator/config/samples/kustomization.yaml b/infra/feast-operator/config/samples/kustomization.yaml index 4869cc7b245..ecb2e09c95b 100644 --- a/infra/feast-operator/config/samples/kustomization.yaml +++ b/infra/feast-operator/config/samples/kustomization.yaml @@ -1,4 +1,5 @@ ## Append samples of your project ## resources: - v1alpha1_featurestore.yaml +- v1alpha1_featurestore_all_servers.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_all_servers.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_all_servers.yaml new file mode 100644 index 00000000000..0d9d744a678 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_all_servers.yaml @@ -0,0 +1,13 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-all-servers +spec: + feastProject: my_project + services: + offlineStore: + server: {} + registry: + local: + server: {} + ui: {} diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_db_persistence.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_db_persistence.yaml new file mode 100644 index 00000000000..e66b7fc3283 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_db_persistence.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: Secret +metadata: + name: postgres-secret + namespace: test + labels: + app: postgres +stringData: + POSTGRES_DB: feast + POSTGRES_USER: feast + POSTGRES_PASSWORD: feast +--- +apiVersion: v1 +kind: Secret +metadata: + name: feast-data-stores + namespace: test +stringData: + sql: | + path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.test.svc.cluster.local:5432/${POSTGRES_DB} + cache_ttl_seconds: 60 + sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true + postgres: | + host: postgres.test.svc.cluster.local + port: 5432 + database: ${POSTGRES_DB} + db_schema: public + user: ${POSTGRES_USER} + password: ${POSTGRES_PASSWORD} +--- +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-db-persistence + namespace: test +spec: + feastProject: my_project + services: + onlineStore: + persistence: + store: + type: postgres + secretRef: + name: feast-data-stores + server: + envFrom: + - secretRef: + name: postgres-secret + registry: + local: + persistence: + store: + type: sql + secretRef: + name: feast-data-stores diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_git.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_git.yaml new file mode 100644 index 00000000000..7730ef88518 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_git.yaml @@ -0,0 +1,10 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-git +spec: + feastProject: credit_scoring_local + feastProjectDir: + git: + url: https://github.com/feast-dev/feast-credit-score-local-tutorial + ref: 598a270 diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_git_repopath.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_git_repopath.yaml new file mode 100644 index 00000000000..6519e1bf429 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_git_repopath.yaml @@ -0,0 +1,11 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-git-repopath +spec: + feastProject: feast_demo_odfv + feastProjectDir: + git: + url: https://github.com/feast-dev/feast-workshop + ref: e959053 + featureRepoPath: module_2/feature_repo diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_git_token.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_git_token.yaml new file mode 100644 index 00000000000..f16f503c8fb --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_git_token.yaml @@ -0,0 +1,21 @@ +kind: Secret +apiVersion: v1 +metadata: + name: git-token +stringData: + TOKEN: xxxxxxxxxxx +--- +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-git-token +spec: + feastProject: private + feastProjectDir: + git: + configs: + 'url."https://api:${TOKEN}@github.com/".insteadOf': 'https://github.com/' + envFrom: + - secretRef: + name: git-token + url: 'https://github.com/user/private' diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_init.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_init.yaml new file mode 100644 index 00000000000..f2324eeab2d --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_init.yaml @@ -0,0 +1,9 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-init +spec: + feastProject: sample_init + feastProjectDir: + init: + template: spark diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml new file mode 100644 index 00000000000..33225b2edfb --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_kubernetes_auth.yaml @@ -0,0 +1,20 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-kubernetes-auth +spec: + feastProject: feast_rbac + authz: + kubernetes: + roles: + - feast-writer + - feast-reader + services: + offlineStore: + server: {} + onlineStore: + server: {} + registry: + local: + server: {} + ui: {} diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_objectstore_persistence.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_objectstore_persistence.yaml new file mode 100644 index 00000000000..2146dabe85c --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_objectstore_persistence.yaml @@ -0,0 +1,16 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-s3-registry +spec: + feastProject: my_project + services: + registry: + local: + persistence: + file: + path: s3://bucket/registry.db + s3_additional_kwargs: + ServerSideEncryption: AES256 + ACL: bucket-owner-full-control + CacheControl: max-age=3600 diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_oidc_auth.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_oidc_auth.yaml new file mode 100644 index 00000000000..54660a5c232 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_oidc_auth.yaml @@ -0,0 +1,21 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-oidc-auth +spec: + feastProject: my_project + authz: + oidc: + secretRef: + name: oidc-secret +--- +kind: Secret +apiVersion: v1 +metadata: + name: oidc-secret +stringData: + client_id: client_id + auth_discovery_url: auth_discovery_url + client_secret: client_secret + username: username + password: password diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_db_volumes_tls.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_db_volumes_tls.yaml new file mode 100644 index 00000000000..61add153716 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_db_volumes_tls.yaml @@ -0,0 +1,83 @@ +apiVersion: v1 +kind: Secret +metadata: + name: postgres-secret + labels: + app: postgres +stringData: + POSTGRES_DB: feast + POSTGRES_USER: admin + POSTGRES_PASSWORD: password + POSTGRES_HOST: postgresql.feast.svc.cluster.local +--- +apiVersion: v1 +kind: Secret +metadata: + name: feast-data-stores +stringData: + sql: | + path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:5432/${POSTGRES_DB}?sslmode=verify-full&sslrootcert=/var/lib/postgresql/certs/ca.crt&sslcert=/var/lib/postgresql/certs/tls.crt&sslkey=/var/lib/postgresql/certs/tls.key + cache_ttl_seconds: 60 + sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true + postgres: | + host: ${POSTGRES_HOST} + port: 5432 + database: ${POSTGRES_DB} + db_schema: public + user: ${POSTGRES_USER} + password: ${POSTGRES_PASSWORD} + sslmode: verify-full + sslkey_path: /var/lib/postgresql/certs/tls.key + sslcert_path: /var/lib/postgresql/certs/tls.crt + sslrootcert_path: /var/lib/postgresql/certs/ca.crt +--- +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-db-ssl +spec: + feastProject: postgres_tls_sample + services: + volumes: + - name: postgres-certs + secret: + secretName: postgresql-client-certs + items: + - key: ca.crt + path: ca.crt + mode: 0644 # Readable by all, required by PostgreSQL + - key: tls.crt + path: tls.crt + mode: 0644 # Required for the client certificate + - key: tls.key + path: tls.key + mode: 0640 # Required for the private key + offlineStore: + persistence: + store: + type: postgres + secretRef: + name: feast-data-stores + onlineStore: + persistence: + store: + type: postgres + secretRef: + name: feast-data-stores + server: + volumeMounts: + - name: postgres-certs + mountPath: /var/lib/postgresql/certs + readOnly: true + envFrom: + - secretRef: + name: postgres-secret + registry: + local: + persistence: + store: + type: sql + secretRef: + name: feast-data-stores diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml new file mode 100644 index 00000000000..42e1ae4b4a6 --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_postgres_tls_volumes_ca_env.yaml @@ -0,0 +1,84 @@ +apiVersion: v1 +kind: Secret +metadata: + name: postgres-secret + labels: + app: postgres +stringData: + POSTGRES_DB: feast + POSTGRES_USER: admin + POSTGRES_PASSWORD: password + POSTGRES_HOST: postgresql.feast.svc.cluster.local + FEAST_CA_CERT_FILE_PATH: /var/lib/postgresql/certs/ca.crt +--- +apiVersion: v1 +kind: Secret +metadata: + name: feast-data-stores +stringData: + sql: | + path: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:5432/${POSTGRES_DB}?sslmode=verify-full&sslrootcert=system&sslcert=/var/lib/postgresql/certs/tls.crt&sslkey=/var/lib/postgresql/certs/tls.key + cache_ttl_seconds: 60 + sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true + postgres: | + host: ${POSTGRES_HOST} + port: 5432 + database: ${POSTGRES_DB} + db_schema: public + user: ${POSTGRES_USER} + password: ${POSTGRES_PASSWORD} + sslmode: verify-full + sslkey_path: /var/lib/postgresql/certs/tls.key + sslcert_path: /var/lib/postgresql/certs/tls.crt + sslrootcert_path: system +--- +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-db-ssl +spec: + feastProject: postgres_tls_sample_env_ca + services: + volumes: + - name: postgres-certs + secret: + secretName: postgresql-client-certs + items: + - key: ca.crt + path: ca.crt + mode: 0644 # Readable by all, required by PostgreSQL + - key: tls.crt + path: tls.crt + mode: 0644 # Required for the client certificate + - key: tls.key + path: tls.key + mode: 0640 # Required for the private key + offlineStore: + persistence: + store: + type: postgres + secretRef: + name: feast-data-stores + onlineStore: + persistence: + store: + type: postgres + secretRef: + name: feast-data-stores + server: + volumeMounts: + - name: postgres-certs + mountPath: /var/lib/postgresql/certs + readOnly: true + envFrom: + - secretRef: + name: postgres-secret + registry: + local: + persistence: + store: + type: sql + secretRef: + name: feast-data-stores diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_pvc_persistence.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_pvc_persistence.yaml new file mode 100644 index 00000000000..15aa46c456c --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_pvc_persistence.yaml @@ -0,0 +1,48 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-pvc-persistence +spec: + feastProject: my_project + services: + # demonstrates using a pre-existing PVC + onlineStore: + persistence: + file: + path: online_store.db + pvc: + ref: + name: online-pvc + mountPath: /data/online + # demonstrates specifying a storageClassName and storage size + offlineStore: + persistence: + file: + type: duckdb + pvc: + create: + storageClassName: standard + resources: + requests: + storage: 5Gi + mountPath: /data/offline + # demonstrates letting the Operator create a PVC w/ defaults set + registry: + local: + persistence: + file: + path: registry.db + pvc: + create: {} + mountPath: /data/registry +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: online-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi diff --git a/infra/feast-operator/config/samples/v1alpha1_featurestore_services_loglevel.yaml b/infra/feast-operator/config/samples/v1alpha1_featurestore_services_loglevel.yaml new file mode 100644 index 00000000000..e738e6352be --- /dev/null +++ b/infra/feast-operator/config/samples/v1alpha1_featurestore_services_loglevel.yaml @@ -0,0 +1,19 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: sample-services-loglevel +spec: + feastProject: my_project + services: + onlineStore: + server: + logLevel: debug + offlineStore: + server: + logLevel: debug + registry: + local: + server: + logLevel: debug + ui: + logLevel: debug diff --git a/infra/feast-operator/dist/install.yaml b/infra/feast-operator/dist/install.yaml index 4d66fbdc734..df7be4ffb0a 100644 --- a/infra/feast-operator/dist/install.yaml +++ b/infra/feast-operator/dist/install.yaml @@ -11,7 +11,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: featurestores.feast.dev spec: group: feast.dev @@ -37,39 +37,79 @@ spec: description: FeatureStore is the Schema for the featurestores API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: APIVersion defines the versioned schema of this representation + of an object. type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: Kind is a string value representing the REST resource this + object represents. type: string metadata: type: object spec: description: FeatureStoreSpec defines the desired state of FeatureStore properties: + authz: + description: AuthzConfig defines the authorization settings for the + deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed in the + same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, c)' feastProject: - description: FeastProject is the Feast project id. This can be any - alphanumeric string with underscores, but it cannot start with an - underscore. Required. + description: FeastProject is the Feast project id. pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ type: string - services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. properties: - offlineStore: - description: OfflineStore configures the deployed offline store - service + git: + description: GitCloneOptions describes how a clone should be performed. properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object env: items: description: EnvVar represents an environment variable present @@ -83,13 +123,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's value. @@ -102,10 +136,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap or @@ -116,9 +151,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -135,7 +170,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for volumes, @@ -165,10 +200,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret or its @@ -183,256 +219,268 @@ spec: - name type: object type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image - type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: name: + default: "" description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - required: - - name + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - onlineStore: - description: OnlineStore configures the deployed online store - service - properties: - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + secretRef: + description: The Secret to select from properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: + name: + default: "" description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean type: object - required: - - name + x-kubernetes-map-type: atomic type: object type: array - image: + featureRepoPath: + description: FeatureRepoPath is the relative path to the feature + repo subdirectory. Default is 'feature_repo'. type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to - pull a container image + ref: + description: Reference to a branch / tag / commit type: string - resources: - description: ResourceRequirements describes the compute resource - requirements. + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true + maxSurge: + anyOf: + - type: integer + - type: string description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be unavailable + during the update. + x-kubernetes-int-or-string: true type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string type: object - registry: - description: Registry configures the registry service. One selection - is required. Local is the default setting. + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service properties: - local: - description: LocalRegistryConfig configures the deployed registry - service + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures the + file-based persistence for the offline store service + properties: + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a remote offline server container properties: env: items: @@ -447,13 +495,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's @@ -466,10 +508,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap @@ -480,9 +523,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -499,7 +542,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for @@ -530,10 +573,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret @@ -548,12 +592,66 @@ spec: - name type: object type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array image: type: string imagePullPolicy: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -562,13 +660,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -576,8 +667,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -595,7 +685,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -604,69 +694,248 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service properties: - feastRef: - description: Reference to an existing `FeatureStore` CR - in the same k8s cluster. + file: + description: OnlineStoreFilePersistence configures the + file-based persistence for the online store service properties: - name: - description: Name of the FeatureStore + path: type: string - namespace: - description: Namespace of the FeatureStore + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with no + slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus type: string required: - - name + - secretRef + - type type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string type: object x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, c)' - type: object - required: - - feastProject - type: object - status: - description: FeatureStoreStatus defines the observed state of FeatureStore - properties: - applied: - description: Shows the currently applied feast configuration, including - any pertinent defaults - properties: - feastProject: - description: FeastProject is the Feast project id. This can be - any alphanumeric string with underscores, but it cannot start - with an underscore. Required. - pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ - type: string - services: - description: FeatureStoreServices defines the desired feast service - deployments. ephemeral registry is deployed by default. - properties: - offlineStore: - description: OfflineStore configures the deployed offline - store service + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a feature server container properties: env: items: @@ -681,13 +950,7 @@ spec: description: |- Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + any type: string valueFrom: description: Source for the environment variable's @@ -700,10 +963,11 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the ConfigMap @@ -714,9 +978,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' properties: apiVersion: description: Version of the schema the FieldPath @@ -733,7 +997,7 @@ spec: resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + (limits.cpu, limits.memory, limits. properties: containerName: description: 'Container name: required for @@ -764,10 +1028,11 @@ spec: from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret @@ -782,12 +1047,66 @@ spec: - name type: object type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array image: type: string imagePullPolicy: description: PullPolicy describes a policy for if/when to pull a container image type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string resources: description: ResourceRequirements describes the compute resource requirements. @@ -796,13 +1115,6 @@ spec: description: |- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -810,8 +1122,7 @@ spec: name: description: |- Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + the Pod where this field is used. type: string required: - name @@ -829,7 +1140,7 @@ spec: x-kubernetes-int-or-string: true description: |- Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + More info: https://kubernetes. type: object requests: additionalProperties: @@ -838,417 +1149,5810 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + description: Requests describes the minimum amount + of compute resources required. type: object type: object - type: object - onlineStore: - description: OnlineStore configures the deployed online store - service - properties: - env: + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. items: - description: EnvVar represents an environment variable - present in a Container. + description: VolumeMount describes a mounting of a Volume + within a container. properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: This must match the Name of a Volume. type: string - value: + readOnly: description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One selection + is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures the + file-based persistence for the registry service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object store + URIs. + rule: '(!has(self.pvc) && has(self.path)) ? (self.path.startsWith(''/'') + || self.path.startsWith(''s3://'') || self.path.startsWith(''gs://'')) + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 or + GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available only + for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - required: - - resource + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean type: object x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string optional: description: Specify whether the Secret - or its key must be defined + must be defined type: boolean - required: - - key type: object x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. + description: VolumeMount describes a mounting of + a Volume within a container. properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string name: + description: This must match the Name of a Volume. + type: string + readOnly: description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. type: string required: + - mountPath - name type: object type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object type: object type: object - registry: - description: Registry configures the registry service. One - selection is required. Local is the default setting. + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. properties: - local: - description: LocalRegistryConfig configures the deployed - registry service + feastRef: + description: Reference to an existing `FeatureStore` CR + in the same k8s cluster. properties: - env: - items: - description: EnvVar represents an environment variable - present in a Container. + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for the + client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap where + the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount of + compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. useful + in an openshift cluster, for example, where TLS is configured + by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key names + for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where the + TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes that + should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the FeatureStore + deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is + the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is + empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver that + handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone PVC + to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may be + used to set the VolumeAttributesClass used + by this claim. + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide identifiers + (wwids)\nEither wwids or combination of targetWWNs + and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set + permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod to + access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of + the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + status: + description: FeatureStoreStatus defines the observed state of FeatureStore + properties: + applied: + description: Shows the currently applied feast configuration, including + any pertinent defaults + properties: + authz: + description: AuthzConfig defines the authorization settings for + the deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed + in the same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, + c)' + feastProject: + description: FeastProject is the Feast project id. + pattern: ^[A-Za-z0-9][A-Za-z0-9_]*$ + type: string + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. + properties: + git: + description: GitCloneOptions describes how a clone should + be performed. + properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + featureRepoPath: + description: FeatureRepoPath is the relative path to the + feature repo subdirectory. Default is 'feature_repo'. + type: string + ref: + description: Reference to a branch / tag / commit + type: string + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast + init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be + unavailable during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or + "RollingUpdate". Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service + properties: + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures + the file-based persistence for the offline store + service + properties: + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures + the file-based persistence for the online store + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS + buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One + selection is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry + service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures + the file-based persistence for the registry + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings + for a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new + PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to + ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the + storage resource requirements for + a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes + the minimum amount of compute + resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the + name of an existing StorageClass + to which this persistent volume + belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing + field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between + ref and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' + and must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object + store URIs. + rule: '(!has(self.pvc) && has(self.path)) ? + (self.path.startsWith(''/'') || self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: PVC path must be a file name only, + with no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 + or GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available + only for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store + "type" is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should + be placed as-is from the "feature_store.yaml" + under the secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type + you want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or + store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS + for a feast service. + properties: + disable: + description: will disable TLS for the feast + service. useful in an openshift cluster, + for example, where TLS is configured by + default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret + where the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` + is false.' + rule: '(!has(self.disable) || !self.disable) + ? has(self.secretRef) : true' + volumeMounts: + description: VolumeMounts defines the list of + volumes that should be mounted into the feast + container. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of + a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` + CR in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for + the client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap + where the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, + c)' + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the + FeatureStore deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone + PVC to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may + be used to set the VolumeAttributesClass + used by this claim. + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide + identifiers (wwids)\nEither wwids or combination + of targetWWNs and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod + to access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: + default: "" description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, - defaults to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults - to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to - select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when - to pull a container image - type: string - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - remote: - description: |- - RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. - Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. - properties: - feastRef: - description: Reference to an existing `FeatureStore` - CR in the same k8s cluster. - properties: - name: - description: Name of the FeatureStore - type: string - namespace: - description: Namespace of the FeatureStore - type: string - required: - - name - type: object - hostname: - description: Host address of the remote registry service - - :, e.g. `registry..svc.cluster.local:80` - type: string - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, - c)' - type: object - x-kubernetes-validations: - - message: One selection required. - rule: '[has(self.local), has(self.remote)].exists_one(c, - c)' + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array type: object required: - feastProject @@ -1259,21 +6963,12 @@ spec: type: string conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. format: date-time type: string message: @@ -1285,18 +6980,13 @@ spec: observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + For instance, if . format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -1312,9 +7002,7 @@ spec: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + Many .condition. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1327,7 +7015,6 @@ spec: type: object type: array feastVersion: - description: Version of feast that's currently deployed type: string phase: type: string @@ -1341,6 +7028,8 @@ spec: type: string registry: type: string + ui: + type: string type: object type: object type: object @@ -1469,6 +7158,8 @@ rules: - "" resources: - configmaps + - persistentvolumeclaims + - serviceaccounts - services verbs: - create @@ -1477,6 +7168,13 @@ rules: - list - update - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list - apiGroups: - feast.dev resources: @@ -1503,19 +7201,29 @@ rules: - get - patch - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: feast-operator - name: feast-operator-metrics-reader -rules: -- nonResourceURLs: - - /metrics +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes verbs: + - create + - delete - get + - list + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -1523,7 +7231,7 @@ metadata: labels: app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: feast-operator - name: feast-operator-proxy-role + name: feast-operator-metrics-auth-role rules: - apiGroups: - authentication.k8s.io @@ -1539,6 +7247,19 @@ rules: - create --- apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: feast-operator + name: feast-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: @@ -1577,11 +7298,11 @@ metadata: labels: app.kubernetes.io/managed-by: kustomize app.kubernetes.io/name: feast-operator - name: feast-operator-proxy-rolebinding + name: feast-operator-metrics-auth-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: feast-operator-proxy-role + name: feast-operator-metrics-auth-role subjects: - kind: ServiceAccount name: feast-operator-controller-manager @@ -1601,7 +7322,7 @@ spec: - name: https port: 8443 protocol: TCP - targetPort: https + targetPort: 8443 selector: control-plane: controller-manager --- @@ -1628,35 +7349,15 @@ spec: spec: containers: - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --metrics-bind-address=:8443 - --leader-elect + - --health-probe-bind-address=:8081 command: - /manager - image: feastdev/feast-operator:0.41.0 + env: + - name: RELATED_IMAGE_FEATURE_SERVER + value: docker.io/feastdev/feature-server:0.46.0 + image: feastdev/feast-operator:0.46.0 livenessProbe: httpGet: path: /healthz diff --git a/infra/feast-operator/docs/api/markdown/ref.md b/infra/feast-operator/docs/api/markdown/ref.md new file mode 100644 index 00000000000..aefb5abca76 --- /dev/null +++ b/infra/feast-operator/docs/api/markdown/ref.md @@ -0,0 +1,605 @@ +# API Reference + +## Packages +- [feast.dev/v1alpha1](#feastdevv1alpha1) + + +## feast.dev/v1alpha1 + +Package v1alpha1 contains API Schema definitions for the v1alpha1 API group + +### Resource Types +- [FeatureStore](#featurestore) + + + +#### AuthzConfig + + + +AuthzConfig defines the authorization settings for the deployed Feast services. + +_Appears in:_ +- [FeatureStoreSpec](#featurestorespec) + +| Field | Description | +| --- | --- | +| `kubernetes` _[KubernetesAuthz](#kubernetesauthz)_ | | +| `oidc` _[OidcAuthz](#oidcauthz)_ | | + + +#### ContainerConfigs + + + +ContainerConfigs k8s container settings for the server + +_Appears in:_ +- [ServerConfigs](#serverconfigs) + +| Field | Description | +| --- | --- | +| `image` _string_ | | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envvar-v1-core)_ | | +| `envFrom` _[EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envfromsource-v1-core)_ | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#pullpolicy-v1-core)_ | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | | + + +#### DefaultCtrConfigs + + + +DefaultCtrConfigs k8s container settings that are applied by default + +_Appears in:_ +- [ContainerConfigs](#containerconfigs) +- [ServerConfigs](#serverconfigs) + +| Field | Description | +| --- | --- | +| `image` _string_ | | + + +#### FeastInitOptions + + + +FeastInitOptions defines how to run a `feast init`. + +_Appears in:_ +- [FeastProjectDir](#feastprojectdir) + +| Field | Description | +| --- | --- | +| `minimal` _boolean_ | | +| `template` _string_ | Template for the created project | + + +#### FeastProjectDir + + + +FeastProjectDir defines how to create the feast project directory. + +_Appears in:_ +- [FeatureStoreSpec](#featurestorespec) + +| Field | Description | +| --- | --- | +| `git` _[GitCloneOptions](#gitcloneoptions)_ | | +| `init` _[FeastInitOptions](#feastinitoptions)_ | | + + +#### FeatureStore + + + +FeatureStore is the Schema for the featurestores API + + + +| Field | Description | +| --- | --- | +| `apiVersion` _string_ | `feast.dev/v1alpha1` +| `kind` _string_ | `FeatureStore` +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `spec` _[FeatureStoreSpec](#featurestorespec)_ | | +| `status` _[FeatureStoreStatus](#featurestorestatus)_ | | + + +#### FeatureStoreRef + + + +FeatureStoreRef defines which existing FeatureStore's registry should be used + +_Appears in:_ +- [RemoteRegistryConfig](#remoteregistryconfig) + +| Field | Description | +| --- | --- | +| `name` _string_ | Name of the FeatureStore | +| `namespace` _string_ | Namespace of the FeatureStore | + + +#### FeatureStoreServices + + + +FeatureStoreServices defines the desired feast services. An ephemeral onlineStore feature server is deployed by default. + +_Appears in:_ +- [FeatureStoreSpec](#featurestorespec) + +| Field | Description | +| --- | --- | +| `offlineStore` _[OfflineStore](#offlinestore)_ | | +| `onlineStore` _[OnlineStore](#onlinestore)_ | | +| `registry` _[Registry](#registry)_ | | +| `ui` _[ServerConfigs](#serverconfigs)_ | Creates a UI server container | +| `deploymentStrategy` _[DeploymentStrategy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstrategy-v1-apps)_ | | +| `disableInitContainers` _boolean_ | Disable the 'feast repo initialization' initContainer | +| `volumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volume-v1-core) array_ | Volumes specifies the volumes to mount in the FeatureStore deployment. A corresponding `VolumeMount` should be added to whichever feast service(s) require access to said volume(s). | + + +#### FeatureStoreSpec + + + +FeatureStoreSpec defines the desired state of FeatureStore + +_Appears in:_ +- [FeatureStore](#featurestore) +- [FeatureStoreStatus](#featurestorestatus) + +| Field | Description | +| --- | --- | +| `feastProject` _string_ | FeastProject is the Feast project id. This can be any alphanumeric string with underscores, but it cannot start with an underscore. Required. | +| `feastProjectDir` _[FeastProjectDir](#feastprojectdir)_ | | +| `services` _[FeatureStoreServices](#featurestoreservices)_ | | +| `authz` _[AuthzConfig](#authzconfig)_ | | + + +#### FeatureStoreStatus + + + +FeatureStoreStatus defines the observed state of FeatureStore + +_Appears in:_ +- [FeatureStore](#featurestore) + +| Field | Description | +| --- | --- | +| `applied` _[FeatureStoreSpec](#featurestorespec)_ | Shows the currently applied feast configuration, including any pertinent defaults | +| `clientConfigMap` _string_ | ConfigMap in this namespace containing a client `feature_store.yaml` for this feast deployment | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | +| `feastVersion` _string_ | | +| `phase` _string_ | | +| `serviceHostnames` _[ServiceHostnames](#servicehostnames)_ | | + + +#### GitCloneOptions + + + +GitCloneOptions describes how a clone should be performed. + +_Appears in:_ +- [FeastProjectDir](#feastprojectdir) + +| Field | Description | +| --- | --- | +| `url` _string_ | The repository URL to clone from. | +| `ref` _string_ | Reference to a branch / tag / commit | +| `configs` _object (keys:string, values:string)_ | Configs passed to git via `-c` +e.g. http.sslVerify: 'false' +OR 'url."https://api:\${TOKEN}@github.com/".insteadOf': 'https://github.com/' | +| `featureRepoPath` _string_ | FeatureRepoPath is the relative path to the feature repo subdirectory. Default is 'feature_repo'. | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envvar-v1-core)_ | | +| `envFrom` _[EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envfromsource-v1-core)_ | | + + +#### KubernetesAuthz + + + +KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. +https://kubernetes.io/docs/reference/access-authn-authz/rbac/ + +_Appears in:_ +- [AuthzConfig](#authzconfig) + +| Field | Description | +| --- | --- | +| `roles` _string array_ | The Kubernetes RBAC roles to be deployed in the same namespace of the FeatureStore. +Roles are managed by the operator and created with an empty list of rules. +See the Feast permission model at https://docs.feast.dev/getting-started/concepts/permission +The feature store admin is not obligated to manage roles using the Feast operator, roles can be managed independently. +This configuration option is only providing a way to automate this procedure. +Important note: the operator cannot ensure that these roles will match the ones used in the configured Feast permissions. | + + +#### LocalRegistryConfig + + + +LocalRegistryConfig configures the registry service + +_Appears in:_ +- [Registry](#registry) + +| Field | Description | +| --- | --- | +| `server` _[ServerConfigs](#serverconfigs)_ | Creates a registry server container | +| `persistence` _[RegistryPersistence](#registrypersistence)_ | | + + +#### OfflineStore + + + +OfflineStore configures the offline store service + +_Appears in:_ +- [FeatureStoreServices](#featurestoreservices) + +| Field | Description | +| --- | --- | +| `server` _[ServerConfigs](#serverconfigs)_ | Creates a remote offline server container | +| `persistence` _[OfflineStorePersistence](#offlinestorepersistence)_ | | + + +#### OfflineStoreDBStorePersistence + + + +OfflineStoreDBStorePersistence configures the DB store persistence for the offline store service + +_Appears in:_ +- [OfflineStorePersistence](#offlinestorepersistence) + +| Field | Description | +| --- | --- | +| `type` _string_ | Type of the persistence type you want to use. | +| `secretRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. | +| `secretKeyName` _string_ | By default, the selected store "type" is used as the SecretKeyName | + + +#### OfflineStoreFilePersistence + + + +OfflineStoreFilePersistence configures the file-based persistence for the offline store service + +_Appears in:_ +- [OfflineStorePersistence](#offlinestorepersistence) + +| Field | Description | +| --- | --- | +| `type` _string_ | | +| `pvc` _[PvcConfig](#pvcconfig)_ | | + + +#### OfflineStorePersistence + + + +OfflineStorePersistence configures the persistence settings for the offline store service + +_Appears in:_ +- [OfflineStore](#offlinestore) + +| Field | Description | +| --- | --- | +| `file` _[OfflineStoreFilePersistence](#offlinestorefilepersistence)_ | | +| `store` _[OfflineStoreDBStorePersistence](#offlinestoredbstorepersistence)_ | | + + +#### OidcAuthz + + + +OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. +https://auth0.com/docs/authenticate/protocols/openid-connect-protocol + +_Appears in:_ +- [AuthzConfig](#authzconfig) + +| Field | Description | +| --- | --- | +| `secretRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | | + + +#### OnlineStore + + + +OnlineStore configures the online store service + +_Appears in:_ +- [FeatureStoreServices](#featurestoreservices) + +| Field | Description | +| --- | --- | +| `server` _[ServerConfigs](#serverconfigs)_ | Creates a feature server container | +| `persistence` _[OnlineStorePersistence](#onlinestorepersistence)_ | | + + +#### OnlineStoreDBStorePersistence + + + +OnlineStoreDBStorePersistence configures the DB store persistence for the online store service + +_Appears in:_ +- [OnlineStorePersistence](#onlinestorepersistence) + +| Field | Description | +| --- | --- | +| `type` _string_ | Type of the persistence type you want to use. | +| `secretRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. | +| `secretKeyName` _string_ | By default, the selected store "type" is used as the SecretKeyName | + + +#### OnlineStoreFilePersistence + + + +OnlineStoreFilePersistence configures the file-based persistence for the online store service + +_Appears in:_ +- [OnlineStorePersistence](#onlinestorepersistence) + +| Field | Description | +| --- | --- | +| `path` _string_ | | +| `pvc` _[PvcConfig](#pvcconfig)_ | | + + +#### OnlineStorePersistence + + + +OnlineStorePersistence configures the persistence settings for the online store service + +_Appears in:_ +- [OnlineStore](#onlinestore) + +| Field | Description | +| --- | --- | +| `file` _[OnlineStoreFilePersistence](#onlinestorefilepersistence)_ | | +| `store` _[OnlineStoreDBStorePersistence](#onlinestoredbstorepersistence)_ | | + + +#### OptionalCtrConfigs + + + +OptionalCtrConfigs k8s container settings that are optional + +_Appears in:_ +- [ContainerConfigs](#containerconfigs) +- [ServerConfigs](#serverconfigs) + +| Field | Description | +| --- | --- | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envvar-v1-core)_ | | +| `envFrom` _[EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envfromsource-v1-core)_ | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#pullpolicy-v1-core)_ | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | | + + +#### PvcConfig + + + +PvcConfig defines the settings for a persistent file store based on PVCs. +We can refer to an existing PVC using the `Ref` field, or create a new one using the `Create` field. + +_Appears in:_ +- [OfflineStoreFilePersistence](#offlinestorefilepersistence) +- [OnlineStoreFilePersistence](#onlinestorefilepersistence) +- [RegistryFilePersistence](#registryfilepersistence) + +| Field | Description | +| --- | --- | +| `ref` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | Reference to an existing field | +| `create` _[PvcCreate](#pvccreate)_ | Settings for creating a new PVC | +| `mountPath` _string_ | MountPath within the container at which the volume should be mounted. +Must start by "/" and cannot contain ':'. | + + +#### PvcCreate + + + +PvcCreate defines the immutable settings to create a new PVC mounted at the given path. +The PVC name is the same as the associated deployment & feast service name. + +_Appears in:_ +- [PvcConfig](#pvcconfig) + +| Field | Description | +| --- | --- | +| `accessModes` _[PersistentVolumeAccessMode](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#persistentvolumeaccessmode-v1-core) array_ | AccessModes k8s persistent volume access modes. Defaults to ["ReadWriteOnce"]. | +| `storageClassName` _string_ | StorageClassName is the name of an existing StorageClass to which this persistent volume belongs. Empty value +means that this volume does not belong to any StorageClass and the cluster default will be used. | +| `resources` _[VolumeResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volumeresourcerequirements-v1-core)_ | Resources describes the storage resource requirements for a volume. +Default requested storage size depends on the associated service: +- 10Gi for offline store +- 5Gi for online store +- 5Gi for registry | + + +#### Registry + + + +Registry configures the registry service. One selection is required. Local is the default setting. + +_Appears in:_ +- [FeatureStoreServices](#featurestoreservices) + +| Field | Description | +| --- | --- | +| `local` _[LocalRegistryConfig](#localregistryconfig)_ | | +| `remote` _[RemoteRegistryConfig](#remoteregistryconfig)_ | | + + +#### RegistryDBStorePersistence + + + +RegistryDBStorePersistence configures the DB store persistence for the registry service + +_Appears in:_ +- [RegistryPersistence](#registrypersistence) + +| Field | Description | +| --- | --- | +| `type` _string_ | Type of the persistence type you want to use. | +| `secretRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. | +| `secretKeyName` _string_ | By default, the selected store "type" is used as the SecretKeyName | + + +#### RegistryFilePersistence + + + +RegistryFilePersistence configures the file-based persistence for the registry service + +_Appears in:_ +- [RegistryPersistence](#registrypersistence) + +| Field | Description | +| --- | --- | +| `path` _string_ | | +| `pvc` _[PvcConfig](#pvcconfig)_ | | +| `s3_additional_kwargs` _map[string]string_ | | + + +#### RegistryPersistence + + + +RegistryPersistence configures the persistence settings for the registry service + +_Appears in:_ +- [LocalRegistryConfig](#localregistryconfig) + +| Field | Description | +| --- | --- | +| `file` _[RegistryFilePersistence](#registryfilepersistence)_ | | +| `store` _[RegistryDBStorePersistence](#registrydbstorepersistence)_ | | + + +#### RemoteRegistryConfig + + + +RemoteRegistryConfig points to a remote feast registry server. When set, the operator will not deploy a registry for this FeatureStore CR. +Instead, this FeatureStore CR's online/offline services will use a remote registry. One selection is required. + +_Appears in:_ +- [Registry](#registry) + +| Field | Description | +| --- | --- | +| `hostname` _string_ | Host address of the remote registry service - :, e.g. `registry..svc.cluster.local:80` | +| `feastRef` _[FeatureStoreRef](#featurestoreref)_ | Reference to an existing `FeatureStore` CR in the same k8s cluster. | +| `tls` _[TlsRemoteRegistryConfigs](#tlsremoteregistryconfigs)_ | | + + +#### SecretKeyNames + + + +SecretKeyNames defines the secret key names for the TLS key and cert. + +_Appears in:_ +- [TlsConfigs](#tlsconfigs) + +| Field | Description | +| --- | --- | +| `tlsCrt` _string_ | defaults to "tls.crt" | +| `tlsKey` _string_ | defaults to "tls.key" | + + +#### ServerConfigs + + + +ServerConfigs creates a server for the feast service, with specified container configurations. + +_Appears in:_ +- [FeatureStoreServices](#featurestoreservices) +- [LocalRegistryConfig](#localregistryconfig) +- [OfflineStore](#offlinestore) +- [OnlineStore](#onlinestore) + +| Field | Description | +| --- | --- | +| `image` _string_ | | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envvar-v1-core)_ | | +| `envFrom` _[EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#envfromsource-v1-core)_ | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#pullpolicy-v1-core)_ | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | | +| `tls` _[TlsConfigs](#tlsconfigs)_ | | +| `logLevel` _string_ | LogLevel sets the logging level for the server +Allowed values: "debug", "info", "warning", "error", "critical". | +| `volumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#volumemount-v1-core) array_ | VolumeMounts defines the list of volumes that should be mounted into the feast container. +This allows attaching persistent storage, config files, secrets, or other resources +required by the Feast components. Ensure that each volume mount has a corresponding +volume definition in the Volumes field. | + + +#### ServiceHostnames + + + +ServiceHostnames defines the service hostnames in the format of :, e.g. example.svc.cluster.local:80 + +_Appears in:_ +- [FeatureStoreStatus](#featurestorestatus) + +| Field | Description | +| --- | --- | +| `offlineStore` _string_ | | +| `onlineStore` _string_ | | +| `registry` _string_ | | +| `ui` _string_ | | + + +#### TlsConfigs + + + +TlsConfigs configures server TLS for a feast service. in an openshift cluster, this is configured by default using service serving certificates. + +_Appears in:_ +- [ServerConfigs](#serverconfigs) + +| Field | Description | +| --- | --- | +| `secretRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | references the local k8s secret where the TLS key and cert reside | +| `secretKeyNames` _[SecretKeyNames](#secretkeynames)_ | | +| `disable` _boolean_ | will disable TLS for the feast service. useful in an openshift cluster, for example, where TLS is configured by default | + + +#### TlsRemoteRegistryConfigs + + + +TlsRemoteRegistryConfigs configures client TLS for a remote feast registry. in an openshift cluster, this is configured by default when the remote feast registry is using service serving certificates. + +_Appears in:_ +- [RemoteRegistryConfig](#remoteregistryconfig) + +| Field | Description | +| --- | --- | +| `configMapRef` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core)_ | references the local k8s configmap where the TLS cert resides | +| `certName` _string_ | defines the configmap key name for the client TLS cert. | + + diff --git a/infra/feast-operator/docs/crd-ref-templates/config.yaml b/infra/feast-operator/docs/crd-ref-templates/config.yaml new file mode 100644 index 00000000000..42d10e08b03 --- /dev/null +++ b/infra/feast-operator/docs/crd-ref-templates/config.yaml @@ -0,0 +1,8 @@ +processor: + ignoreTypes: + - "(FeatureStore)List$" + ignoreFields: + - "TypeMeta$" + +render: + kubernetesVersion: "1.30" \ No newline at end of file diff --git a/infra/feast-operator/docs/crd-ref-templates/markdown/gv_details.tpl b/infra/feast-operator/docs/crd-ref-templates/markdown/gv_details.tpl new file mode 100644 index 00000000000..30ad0d75184 --- /dev/null +++ b/infra/feast-operator/docs/crd-ref-templates/markdown/gv_details.tpl @@ -0,0 +1,19 @@ +{{- define "gvDetails" -}} +{{- $gv := . -}} + +## {{ $gv.GroupVersionString }} + +{{ $gv.Doc }} + +{{- if $gv.Kinds }} +### Resource Types +{{- range $gv.SortedKinds }} +- {{ $gv.TypeForKind . | markdownRenderTypeLink }} +{{- end }} +{{ end }} + +{{ range $gv.SortedTypes }} +{{ template "type" . }} +{{ end }} + +{{- end -}} diff --git a/infra/feast-operator/docs/crd-ref-templates/markdown/gv_list.tpl b/infra/feast-operator/docs/crd-ref-templates/markdown/gv_list.tpl new file mode 100644 index 00000000000..a4d3dadf18c --- /dev/null +++ b/infra/feast-operator/docs/crd-ref-templates/markdown/gv_list.tpl @@ -0,0 +1,15 @@ +{{- define "gvList" -}} +{{- $groupVersions := . -}} + +# API Reference + +## Packages +{{- range $groupVersions }} +- {{ markdownRenderGVLink . }} +{{- end }} + +{{ range $groupVersions }} +{{ template "gvDetails" . }} +{{ end }} + +{{- end -}} diff --git a/infra/feast-operator/docs/crd-ref-templates/markdown/type.tpl b/infra/feast-operator/docs/crd-ref-templates/markdown/type.tpl new file mode 100644 index 00000000000..c0ac2e03539 --- /dev/null +++ b/infra/feast-operator/docs/crd-ref-templates/markdown/type.tpl @@ -0,0 +1,33 @@ +{{- define "type" -}} +{{- $type := . -}} +{{- if markdownShouldRenderType $type -}} + +#### {{ $type.Name }} + +{{ if $type.IsAlias }}_Underlying type:_ `{{ markdownRenderTypeLink $type.UnderlyingType }}`{{ end }} + +{{ $type.Doc }} + +{{ if $type.References -}} +_Appears in:_ +{{- range $type.SortedReferences }} +- {{ markdownRenderTypeLink . }} +{{- end }} +{{- end }} + +{{ if $type.Members -}} +| Field | Description | +| --- | --- | +{{ if $type.GVK -}} +| `apiVersion` _string_ | `{{ $type.GVK.Group }}/{{ $type.GVK.Version }}` +| `kind` _string_ | `{{ $type.GVK.Kind }}` +{{ end -}} + +{{ range $type.Members -}} +| `{{ .Name }}` _{{ markdownRenderType .Type }}_ | {{ template "type_members" . }} | +{{ end -}} + +{{ end -}} + +{{- end -}} +{{- end -}} diff --git a/infra/feast-operator/docs/crd-ref-templates/markdown/type_members.tpl b/infra/feast-operator/docs/crd-ref-templates/markdown/type_members.tpl new file mode 100644 index 00000000000..182fa182166 --- /dev/null +++ b/infra/feast-operator/docs/crd-ref-templates/markdown/type_members.tpl @@ -0,0 +1,8 @@ +{{- define "type_members" -}} +{{- $field := . -}} +{{- if eq $field.Name "metadata" -}} +Refer to Kubernetes API documentation for fields of `metadata`. +{{- else -}} +{{ $field.Doc }} +{{- end -}} +{{- end -}} diff --git a/infra/feast-operator/go.mod b/infra/feast-operator/go.mod index 65d2aaac502..c8608cb242f 100644 --- a/infra/feast-operator/go.mod +++ b/infra/feast-operator/go.mod @@ -1,23 +1,31 @@ module github.com/feast-dev/feast/infra/feast-operator -go 1.21 +go 1.22.9 require ( - github.com/onsi/ginkgo/v2 v2.14.0 - github.com/onsi/gomega v1.30.0 - k8s.io/apimachinery v0.29.2 - k8s.io/client-go v0.29.2 - sigs.k8s.io/controller-runtime v0.17.3 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + github.com/openshift/api v0.0.0-20240912201240-0a8800162826 // release-4.17 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.30.1 + k8s.io/apimachinery v0.30.1 + k8s.io/client-go v0.30.1 + sigs.k8s.io/controller-runtime v0.18.4 ) require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -25,12 +33,14 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -45,28 +55,41 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.2 // indirect - k8s.io/apiextensions-apiserver v0.29.2 // indirect - k8s.io/component-base v0.29.2 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiserver v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/infra/feast-operator/go.sum b/infra/feast-operator/go.sum index be475e11018..ef5d6204916 100644 --- a/infra/feast-operator/go.sum +++ b/infra/feast-operator/go.sum @@ -1,5 +1,11 @@ +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -13,13 +19,17 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -32,15 +42,17 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -51,6 +63,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -78,10 +92,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/openshift/api v0.0.0-20240912201240-0a8800162826 h1:A8D9SN/hJUwAbdO0rPCVTqmuBOctdgurr53gK701SYo= +github.com/openshift/api v0.0.0-20240912201240-0a8800162826/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -98,10 +114,13 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -110,6 +129,22 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -128,34 +163,36 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -164,39 +201,50 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= -sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/infra/feast-operator/internal/controller/authz/authz.go b/infra/feast-operator/internal/controller/authz/authz.go new file mode 100644 index 00000000000..122e9f69457 --- /dev/null +++ b/infra/feast-operator/internal/controller/authz/authz.go @@ -0,0 +1,206 @@ +package authz + +import ( + "context" + "slices" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" + rbacv1 "k8s.io/api/rbac/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Deploy the feast authorization +func (authz *FeastAuthorization) Deploy() error { + if authz.isKubernetesAuth() { + return authz.deployKubernetesAuth() + } + + authz.removeOrphanedRoles() + _ = authz.Handler.DeleteOwnedFeastObj(authz.initFeastRole()) + _ = authz.Handler.DeleteOwnedFeastObj(authz.initFeastRoleBinding()) + apimeta.RemoveStatusCondition(&authz.Handler.FeatureStore.Status.Conditions, feastKubernetesAuthConditions[metav1.ConditionTrue].Type) + return nil +} + +func (authz *FeastAuthorization) isKubernetesAuth() bool { + authzConfig := authz.Handler.FeatureStore.Status.Applied.AuthzConfig + return authzConfig != nil && authzConfig.KubernetesAuthz != nil +} + +func (authz *FeastAuthorization) deployKubernetesAuth() error { + if authz.isKubernetesAuth() { + authz.removeOrphanedRoles() + + if err := authz.createFeastRole(); err != nil { + return authz.setFeastKubernetesAuthCondition(err) + } + if err := authz.createFeastRoleBinding(); err != nil { + return authz.setFeastKubernetesAuthCondition(err) + } + + for _, roleName := range authz.Handler.FeatureStore.Status.Applied.AuthzConfig.KubernetesAuthz.Roles { + if err := authz.createAuthRole(roleName); err != nil { + return authz.setFeastKubernetesAuthCondition(err) + } + } + } + return authz.setFeastKubernetesAuthCondition(nil) +} + +func (authz *FeastAuthorization) removeOrphanedRoles() { + roleList := &rbacv1.RoleList{} + err := authz.Handler.Client.List(context.TODO(), roleList, &client.ListOptions{ + Namespace: authz.Handler.FeatureStore.Namespace, + LabelSelector: labels.SelectorFromSet(authz.getLabels()), + }) + if err != nil { + return + } + + desiredRoles := []string{} + if authz.isKubernetesAuth() { + desiredRoles = authz.Handler.FeatureStore.Status.Applied.AuthzConfig.KubernetesAuthz.Roles + } + for _, role := range roleList.Items { + roleName := role.Name + if roleName != authz.getFeastRoleName() && !slices.Contains(desiredRoles, roleName) { + _ = authz.Handler.DeleteOwnedFeastObj(authz.initAuthRole(roleName)) + } + } +} + +func (authz *FeastAuthorization) createFeastRole() error { + logger := log.FromContext(authz.Handler.Context) + role := authz.initFeastRole() + if op, err := controllerutil.CreateOrUpdate(authz.Handler.Context, authz.Handler.Client, role, controllerutil.MutateFn(func() error { + return authz.setFeastRole(role) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "Role", role.Name, "operation", op) + } + + return nil +} + +func (authz *FeastAuthorization) initFeastRole() *rbacv1.Role { + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: authz.getFeastRoleName(), Namespace: authz.Handler.FeatureStore.Namespace}, + } + role.SetGroupVersionKind(rbacv1.SchemeGroupVersion.WithKind("Role")) + return role +} + +func (authz *FeastAuthorization) setFeastRole(role *rbacv1.Role) error { + role.Labels = authz.getLabels() + role.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{rbacv1.GroupName}, + Resources: []string{"roles", "rolebindings"}, + Verbs: []string{"get", "list", "watch"}, + }, + } + + return controllerutil.SetControllerReference(authz.Handler.FeatureStore, role, authz.Handler.Scheme) +} + +func (authz *FeastAuthorization) createFeastRoleBinding() error { + logger := log.FromContext(authz.Handler.Context) + roleBinding := authz.initFeastRoleBinding() + if op, err := controllerutil.CreateOrUpdate(authz.Handler.Context, authz.Handler.Client, roleBinding, controllerutil.MutateFn(func() error { + return authz.setFeastRoleBinding(roleBinding) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "RoleBinding", roleBinding.Name, "operation", op) + } + + return nil +} + +func (authz *FeastAuthorization) initFeastRoleBinding() *rbacv1.RoleBinding { + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: authz.getFeastRoleName(), Namespace: authz.Handler.FeatureStore.Namespace}, + } + roleBinding.SetGroupVersionKind(rbacv1.SchemeGroupVersion.WithKind("RoleBinding")) + return roleBinding +} + +func (authz *FeastAuthorization) setFeastRoleBinding(roleBinding *rbacv1.RoleBinding) error { + roleBinding.Labels = authz.getLabels() + roleBinding.Subjects = []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: services.GetFeastName(authz.Handler.FeatureStore), + Namespace: authz.Handler.FeatureStore.Namespace, + }} + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: authz.getFeastRoleName(), + } + + return controllerutil.SetControllerReference(authz.Handler.FeatureStore, roleBinding, authz.Handler.Scheme) +} + +func (authz *FeastAuthorization) createAuthRole(roleName string) error { + logger := log.FromContext(authz.Handler.Context) + role := authz.initAuthRole(roleName) + if op, err := controllerutil.CreateOrUpdate(authz.Handler.Context, authz.Handler.Client, role, controllerutil.MutateFn(func() error { + return authz.setAuthRole(role) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "Role", role.Name, "operation", op) + } + + return nil +} + +func (authz *FeastAuthorization) initAuthRole(roleName string) *rbacv1.Role { + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: roleName, Namespace: authz.Handler.FeatureStore.Namespace}, + } + role.SetGroupVersionKind(rbacv1.SchemeGroupVersion.WithKind("Role")) + return role +} + +func (authz *FeastAuthorization) setAuthRole(role *rbacv1.Role) error { + role.Labels = authz.getLabels() + role.Rules = []rbacv1.PolicyRule{} + + return controllerutil.SetControllerReference(authz.Handler.FeatureStore, role, authz.Handler.Scheme) +} + +func (authz *FeastAuthorization) getLabels() map[string]string { + return map[string]string{ + services.NameLabelKey: authz.Handler.FeatureStore.Name, + } +} + +func (authz *FeastAuthorization) setFeastKubernetesAuthCondition(err error) error { + if err != nil { + logger := log.FromContext(authz.Handler.Context) + cond := feastKubernetesAuthConditions[metav1.ConditionFalse] + cond.Message = "Error: " + err.Error() + apimeta.SetStatusCondition(&authz.Handler.FeatureStore.Status.Conditions, cond) + logger.Error(err, "Error deploying the Kubernetes authorization") + return err + } else { + apimeta.SetStatusCondition(&authz.Handler.FeatureStore.Status.Conditions, feastKubernetesAuthConditions[metav1.ConditionTrue]) + } + return nil +} + +func (authz *FeastAuthorization) getFeastRoleName() string { + return GetFeastRoleName(authz.Handler.FeatureStore) +} + +func GetFeastRoleName(featureStore *feastdevv1alpha1.FeatureStore) string { + return services.GetFeastName(featureStore) +} diff --git a/infra/feast-operator/internal/controller/authz/authz_types.go b/infra/feast-operator/internal/controller/authz/authz_types.go new file mode 100644 index 00000000000..f955f5b40f1 --- /dev/null +++ b/infra/feast-operator/internal/controller/authz/authz_types.go @@ -0,0 +1,28 @@ +package authz + +import ( + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FeastAuthorization is an interface for configuring feast authorization +type FeastAuthorization struct { + Handler handler.FeastHandler +} + +var ( + feastKubernetesAuthConditions = map[metav1.ConditionStatus]metav1.Condition{ + metav1.ConditionTrue: { + Type: feastdevv1alpha1.AuthorizationReadyType, + Status: metav1.ConditionTrue, + Reason: feastdevv1alpha1.ReadyReason, + Message: feastdevv1alpha1.KubernetesAuthzReadyMessage, + }, + metav1.ConditionFalse: { + Type: feastdevv1alpha1.AuthorizationReadyType, + Status: metav1.ConditionFalse, + Reason: feastdevv1alpha1.KubernetesAuthzFailedReason, + }, + } +) diff --git a/infra/feast-operator/internal/controller/featurestore_controller.go b/infra/feast-operator/internal/controller/featurestore_controller.go index 244bbcaae80..c3353c859f8 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller.go +++ b/infra/feast-operator/internal/controller/featurestore_controller.go @@ -23,6 +23,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,13 +31,15 @@ import ( "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" + handler "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/authz" + feasthandler "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" + routev1 "github.com/openshift/api/route/v1" ) // Constants for requeue @@ -50,11 +53,14 @@ type FeatureStoreReconciler struct { Scheme *runtime.Scheme } -//+kubebuilder:rbac:groups=feast.dev,resources=featurestores,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=feast.dev,resources=featurestores/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=feast.dev,resources=featurestores/finalizers,verbs=update -//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;create;update;watch;delete -//+kubebuilder:rbac:groups=core,resources=services;configmaps,verbs=get;list;create;update;watch;delete +// +kubebuilder:rbac:groups=feast.dev,resources=featurestores,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=feast.dev,resources=featurestores/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=feast.dev,resources=featurestores/finalizers,verbs=update +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;create;update;watch;delete +// +kubebuilder:rbac:groups=core,resources=services;configmaps;persistentvolumeclaims;serviceaccounts,verbs=get;list;create;update;watch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;create;update;watch;delete +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;create;update;watch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -77,11 +83,9 @@ func (r *FeatureStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request } currentStatus := cr.Status.DeepCopy() - // initial status defaults must occur before feast deployment - applyDefaultsToStatus(cr) result, recErr = r.deployFeast(ctx, cr) if cr.DeletionTimestamp == nil && !reflect.DeepEqual(currentStatus, cr.Status) { - if err := r.Client.Status().Update(ctx, cr); err != nil { + if err = r.Client.Status().Update(ctx, cr); err != nil { if apierrors.IsConflict(err) { logger.Info("FeatureStore object modified, retry syncing status") // Re-queue and preserve existing recErr @@ -106,21 +110,58 @@ func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1 Reason: feastdevv1alpha1.ReadyReason, Message: feastdevv1alpha1.ReadyMessage, } - feast := services.FeastServices{ - Client: r.Client, - Context: ctx, - FeatureStore: cr, - Scheme: r.Scheme, + Handler: feasthandler.FeastHandler{ + Client: r.Client, + Context: ctx, + FeatureStore: cr, + Scheme: r.Scheme, + }, + } + authz := authz.FeastAuthorization{ + Handler: feast.Handler, + } + + // status defaults must be applied before deployments + errResult := ctrl.Result{Requeue: true, RequeueAfter: RequeueDelayError} + if err = feast.ApplyDefaults(); err != nil { + result = errResult + } else if err = authz.Deploy(); err != nil { + result = errResult + } else if err = feast.Deploy(); err != nil { + result = errResult } - if err = feast.Deploy(); err != nil { + if err != nil { condition = metav1.Condition{ Type: feastdevv1alpha1.ReadyType, Status: metav1.ConditionFalse, Reason: feastdevv1alpha1.FailedReason, Message: "Error: " + err.Error(), } - result = ctrl.Result{Requeue: true, RequeueAfter: RequeueDelayError} + } else { + deployment, deploymentErr := feast.GetDeployment() + if deploymentErr != nil { + condition = metav1.Condition{ + Type: feastdevv1alpha1.ReadyType, + Status: metav1.ConditionUnknown, + Reason: feastdevv1alpha1.DeploymentNotAvailableReason, + Message: feastdevv1alpha1.DeploymentNotAvailableMessage, + } + + result = errResult + } else { + isDeployAvailable := services.IsDeploymentAvailable(deployment.Status.Conditions) + if !isDeployAvailable { + condition = metav1.Condition{ + Type: feastdevv1alpha1.ReadyType, + Status: metav1.ConditionUnknown, + Reason: feastdevv1alpha1.DeploymentNotAvailableReason, + Message: feastdevv1alpha1.DeploymentNotAvailableMessage, + } + + result = errResult + } + } } logger.Info(condition.Message) @@ -138,13 +179,22 @@ func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1 // SetupWithManager sets up the controller with the Manager. func (r *FeatureStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). + bldr := ctrl.NewControllerManagedBy(mgr). For(&feastdevv1alpha1.FeatureStore{}). Owns(&corev1.ConfigMap{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). - Watches(&feastdevv1alpha1.FeatureStore{}, handler.EnqueueRequestsFromMapFunc(r.mapFeastRefsToFeastRequests)). - Complete(r) + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.Role{}). + Watches(&feastdevv1alpha1.FeatureStore{}, handler.EnqueueRequestsFromMapFunc(r.mapFeastRefsToFeastRequests)) + if services.IsOpenShift() { + bldr = bldr.Owns(&routev1.Route{}) + } + + return bldr.Complete(r) + } // if a remotely referenced FeatureStore is changed, reconcile any FeatureStores that reference it. @@ -167,11 +217,12 @@ func (r *FeatureStoreReconciler) mapFeastRefsToFeastRequests(ctx context.Context // this if statement is extra protection against any potential infinite reconcile loops if feastRefNsName != objNsName { feast := services.FeastServices{ - Client: r.Client, - Context: ctx, - FeatureStore: &obj, - Scheme: r.Scheme, - } + Handler: feasthandler.FeastHandler{ + Client: r.Client, + Context: ctx, + FeatureStore: &obj, + Scheme: r.Scheme, + }} if feast.IsRemoteRefRegistry() { remoteRef := obj.Status.Applied.Services.Registry.Remote.FeastRef remoteRefNsName := types.NamespacedName{Name: remoteRef.Name, Namespace: remoteRef.Namespace} @@ -184,39 +235,3 @@ func (r *FeatureStoreReconciler) mapFeastRefsToFeastRequests(ctx context.Context return requests } - -func applyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { - cr.Status.FeastVersion = feastversion.FeastVersion - applied := cr.Spec.DeepCopy() - if applied.Services == nil { - applied.Services = &feastdevv1alpha1.FeatureStoreServices{} - } - - // default to registry service deployment - if applied.Services.Registry == nil { - applied.Services.Registry = &feastdevv1alpha1.Registry{} - } - // if remote registry not set, proceed w/ local registry defaults - if applied.Services.Registry.Remote == nil { - // if local registry not set, apply an empty pointer struct - if applied.Services.Registry.Local == nil { - applied.Services.Registry.Local = &feastdevv1alpha1.LocalRegistryConfig{} - } - setServiceDefaultConfigs(&applied.Services.Registry.Local.ServiceConfigs.DefaultConfigs) - } - if applied.Services.OfflineStore != nil { - setServiceDefaultConfigs(&applied.Services.OfflineStore.ServiceConfigs.DefaultConfigs) - } - if applied.Services.OnlineStore != nil { - setServiceDefaultConfigs(&applied.Services.OnlineStore.ServiceConfigs.DefaultConfigs) - } - - // overwrite status.applied with every reconcile - applied.DeepCopyInto(&cr.Status.Applied) -} - -func setServiceDefaultConfigs(defaultConfigs *feastdevv1alpha1.DefaultConfigs) { - if defaultConfigs.Image == nil { - defaultConfigs.Image = &services.DefaultImage - } -} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go new file mode 100644 index 00000000000..390c225f613 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go @@ -0,0 +1,725 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var cassandraYamlString = ` +hosts: + - 192.168.1.1 + - 192.168.1.2 + - 192.168.1.3 +keyspace: KeyspaceName +port: 9042 +username: user +password: secret +protocol_version: 5 +load_balancing: + local_dc: datacenter1 + load_balancing_policy: TokenAwarePolicy(DCAwareRoundRobinPolicy) +read_concurrency: 100 +write_concurrency: 100 +` + +var snowflakeYamlString = ` +account: snowflake_deployment.us-east-1 +user: user_login +password: user_password +role: SYSADMIN +warehouse: COMPUTE_WH +database: FEAST +schema: PUBLIC +` + +var sqlTypeYamlString = ` +path: postgresql://postgres:mysecretpassword@127.0.0.1:55001/feast +cache_ttl_seconds: 60 +sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true +` + +var secretContainingValidTypeYamlString = ` +type: cassandra +hosts: + - 192.168.1.1 + - 192.168.1.2 + - 192.168.1.3 +keyspace: KeyspaceName +port: 9042 +username: user +password: secret +protocol_version: 5 +load_balancing: + local_dc: datacenter1 + load_balancing_policy: TokenAwarePolicy(DCAwareRoundRobinPolicy) +read_concurrency: 100 +write_concurrency: 100 +` + +var invalidSecretTypeYamlString = ` +type: wrong +hosts: + - 192.168.1.1 + - 192.168.1.2 + - 192.168.1.3 +keyspace: KeyspaceName +port: 9042 +username: user +password: secret +protocol_version: 5 +load_balancing: + local_dc: datacenter1 + load_balancing_policy: TokenAwarePolicy(DCAwareRoundRobinPolicy) +read_concurrency: 100 +write_concurrency: 100 +` + +var invalidSecretRegistryTypeYamlString = ` +registry_type: sql +path: postgresql://postgres:mysecretpassword@127.0.0.1:55001/feast +cache_ttl_seconds: 60 +sqlalchemy_config_kwargs: + echo: false + pool_pre_ping: true +` + +var _ = Describe("FeatureStore Controller - db storage services", func() { + Context("When deploying a resource with all db storage services", func() { + const resourceName = "cr-name" + var pullPolicy = corev1.PullAlways + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + + offlineSecretNamespacedName := types.NamespacedName{ + Name: "offline-store-secret", + Namespace: "default", + } + + onlineSecretNamespacedName := types.NamespacedName{ + Name: "online-store-secret", + Namespace: "default", + } + + registrySecretNamespacedName := types.NamespacedName{ + Name: "registry-store-secret", + Namespace: "default", + } + + featurestore := &feastdevv1alpha1.FeatureStore{} + offlineType := services.OfflineDBPersistenceSnowflakeConfigType + onlineType := services.OnlineDBPersistenceCassandraConfigType + registryType := services.RegistryDBPersistenceSQLConfigType + + BeforeEach(func() { + By("creating secrets for db stores for custom resource of Kind FeatureStore") + secret := &corev1.Secret{} + + secretData := map[string][]byte{ + string(offlineType): []byte(snowflakeYamlString), + } + err := k8sClient.Get(ctx, offlineSecretNamespacedName, secret) + if err != nil && errors.IsNotFound(err) { + secret.ObjectMeta = metav1.ObjectMeta{ + Name: offlineSecretNamespacedName.Name, + Namespace: offlineSecretNamespacedName.Namespace, + } + secret.Data = secretData + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + secret = &corev1.Secret{} + + secretData = map[string][]byte{ + string(onlineType): []byte(cassandraYamlString), + } + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + if err != nil && errors.IsNotFound(err) { + secret.ObjectMeta = metav1.ObjectMeta{ + Name: onlineSecretNamespacedName.Name, + Namespace: onlineSecretNamespacedName.Namespace, + } + secret.Data = secretData + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + secret = &corev1.Secret{} + + secretData = map[string][]byte{ + "sql_custom_registry_key": []byte(sqlTypeYamlString), + } + err = k8sClient.Get(ctx, registrySecretNamespacedName, secret) + if err != nil && errors.IsNotFound(err) { + secret.ObjectMeta = metav1.ObjectMeta{ + Name: registrySecretNamespacedName.Name, + Namespace: registrySecretNamespacedName.Namespace, + } + secret.Data = secretData + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + createEnvFromSecretAndConfigMap() + + By("creating the custom resource for the Kind FeatureStore") + err = k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}, withEnvFrom()) + resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ + Type: string(offlineType), + SecretRef: corev1.LocalObjectReference{ + Name: "offline-store-secret", + }, + }, + } + resource.Spec.Services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OnlineStoreDBStorePersistence{ + Type: string(onlineType), + SecretRef: corev1.LocalObjectReference{ + Name: "online-store-secret", + }, + }, + } + resource.Spec.Services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{ + DBPersistence: &feastdevv1alpha1.RegistryDBStorePersistence{ + Type: string(registryType), + SecretRef: corev1.LocalObjectReference{ + Name: "registry-store-secret", + }, + SecretKeyName: "sql_custom_registry_key", + }, + } + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + onlineSecret := &corev1.Secret{} + err := k8sClient.Get(ctx, onlineSecretNamespacedName, onlineSecret) + Expect(err).NotTo(HaveOccurred()) + + offlineSecret := &corev1.Secret{} + err = k8sClient.Get(ctx, offlineSecretNamespacedName, offlineSecret) + Expect(err).NotTo(HaveOccurred()) + + registrySecret := &corev1.Secret{} + err = k8sClient.Get(ctx, registrySecretNamespacedName, registrySecret) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + deleteEnvFromSecretAndConfigMap() + + By("Cleanup the secrets") + Expect(k8sClient.Delete(ctx, onlineSecret)).To(Succeed()) + Expect(k8sClient.Delete(ctx, offlineSecret)).To(Succeed()) + Expect(k8sClient.Delete(ctx, registrySecret)).To(Succeed()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail reconciling the resource", func() { + By("Referring to a secret that doesn't exist") + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "invalid_secret"} + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(HaveOccurred()) + + Expect(err.Error()).To(Equal("secrets \"invalid_secret\" not found")) + + By("Referring to a secret with a key that doesn't exist") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "online-store-secret"} + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = "invalid.secret.key" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(HaveOccurred()) + + Expect(err.Error()).To(Equal("secret key invalid.secret.key doesn't exist in secret online-store-secret")) + + By("Referring to a secret that contains parameter named type with invalid value") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{} + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(invalidSecretTypeYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "online-store-secret"} + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = "" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(HaveOccurred()) + + Expect(err.Error()).To(Equal("secret key cassandra in secret online-store-secret contains tag named type with value wrong")) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.DBPersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.DBPersistence.Type).To(Equal(string(offlineType))) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.DBPersistence.SecretRef).To(Equal(corev1.LocalObjectReference{Name: "offline-store-secret"})) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.DBPersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.DBPersistence.Type).To(Equal(string(onlineType))) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.DBPersistence.SecretRef).To(Equal(corev1.LocalObjectReference{Name: "online-store-secret"})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.DBPersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.DBPersistence.Type).To(Equal(string(registryType))) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.DBPersistence.SecretRef).To(Equal(corev1.LocalObjectReference{Name: "registry-store-secret"})) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.DBPersistence.SecretKeyName).To(Equal("sql_custom_registry_key")) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + svc := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.RegistryFeastType), + Namespace: resource.Namespace, + }, + svc) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) + Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetHttpPort)))) + + By("Referring to a secret that contains parameter named type") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{} + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(secretContainingValidTypeYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "online-store-secret"} + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = "" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + + Expect(err).To(Not(HaveOccurred())) + + By("Referring to a secret that contains parameter named registry_type") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + secret = &corev1.Secret{} + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(cassandraYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + secret = &corev1.Secret{} + err = k8sClient.Get(ctx, registrySecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data["sql_custom_registry_key"] = nil + secret.Data[string(services.RegistryDBPersistenceSQLConfigType)] = []byte(invalidSecretRegistryTypeYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "registry-store-secret"} + resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretKeyName = "" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(Not(HaveOccurred())) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(4)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + dbParametersMap := unmarshallYamlString(sqlTypeYamlString) + copyMap := services.CopyMap(dbParametersMap) + delete(dbParametersMap, "path") + testConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: services.OfflineStoreConfig{ + Type: services.OfflineDBPersistenceSnowflakeConfigType, + DBParameters: unmarshallYamlString(snowflakeYamlString), + }, + Registry: services.RegistryConfig{ + Path: copyMap["path"].(string), + RegistryType: services.RegistryDBPersistenceSQLConfigType, + DBParameters: dbParametersMap, + }, + OnlineStore: services.OnlineStoreConfig{ + Type: onlineType, + DBParameters: unmarshallYamlString(cassandraYamlString), + }, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfig).To(Equal(testConfig)) + + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.Env).To(HaveLen(1)) + assertEnvFrom(*offlineContainer) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(testConfig)) + + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.VolumeMounts).To(HaveLen(1)) + Expect(onlineContainer.Env).To(HaveLen(1)) + assertEnvFrom(*onlineContainer) + Expect(onlineContainer.ImagePullPolicy).To(Equal(corev1.PullAlways)) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOnline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOnline).To(Equal(testConfig)) + onlineContainer = services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(1)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + offlineRemote := services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + } + regRemote := services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + } + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: offlineRemote, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + Type: services.OnlineRemoteConfigType, + }, + Registry: regRemote, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfigClient).To(Equal(clientConfig)) + + // change paths and reconcile + resourceNew := resource.DeepCopy() + newOnlineSecretName := "offline-store-secret" + newOnlineDBPersistenceType := services.OnlineDBPersistenceSnowflakeConfigType + resourceNew.Spec.Services.OnlineStore.Persistence.DBPersistence.Type = string(newOnlineDBPersistenceType) + resourceNew.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: newOnlineSecretName} + resourceNew.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = string(services.OfflineDBPersistenceSnowflakeConfigType) + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check online config + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + onlineContainer = services.GetOnlineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + + repoConfigOnline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + testConfig.OnlineStore.Type = services.OnlineDBPersistenceSnowflakeConfigType + testConfig.OnlineStore.DBParameters = unmarshallYamlString(snowflakeYamlString) + Expect(repoConfigOnline).To(Equal(testConfig)) + }) + }) +}) + +func unmarshallYamlString(yamlString string) map[string]interface{} { + var parameters map[string]interface{} + + err := yaml.Unmarshal([]byte(yamlString), ¶meters) + if err != nil { + fmt.Println(err) + } + return parameters +} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go new file mode 100644 index 00000000000..a0c01c11449 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go @@ -0,0 +1,458 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller-Ephemeral services", func() { + Context("When deploying a resource with all ephemeral services", func() { + const resourceName = "services-ephemeral" + const offlineType = "duckdb" + var pullPolicy = corev1.PullAlways + var testEnvVarName = "testEnvVarName" + var testEnvVarValue = "testEnvVarValue" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + onlineStorePath := "/data/online.db" + registryPath := "/data/registry.db" + + BeforeEach(func() { + createEnvFromSecretAndConfigMap() + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}, withEnvFrom()) + resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + Type: offlineType, + }, + } + resource.Spec.Services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: onlineStorePath, + }, + } + resource.Spec.Services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: registryPath, + }, + } + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + deleteEnvFromSecretAndConfigMap() + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.Type).To(Equal(offlineType)) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.Path).To(Equal(onlineStorePath)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Env).To(Equal(&[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(registryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + svc := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.RegistryFeastType), + Namespace: resource.Namespace, + }, + svc) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) + Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetHttpPort)))) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(4)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDuckDbConfigType, + }, + Registry: services.RegistryConfig{ + RegistryType: services.RegistryFileConfigType, + Path: registryPath, + }, + OnlineStore: services.OnlineStoreConfig{ + Path: onlineStorePath, + Type: services.OnlineSqliteConfigType, + }, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfig).To(Equal(testConfig)) + + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.Env).To(HaveLen(1)) + assertEnvFrom(*offlineContainer) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + // check envFrom for offlineContainer + assertEnvFrom(*offlineContainer) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(testConfig)) + + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(3)) + Expect(onlineContainer.ImagePullPolicy).To(Equal(corev1.PullAlways)) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOnline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOnline).To(Equal(testConfig)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + }, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + Type: services.OnlineRemoteConfigType, + }, + Registry: services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + }, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfigClient).To(Equal(clientConfig)) + + // change paths and reconcile + resourceNew := resource.DeepCopy() + newOnlineStorePath := "/data/new_online.db" + newRegistryPath := "/data/new_registry.db" + resourceNew.Spec.Services.OnlineStore.Persistence.FilePersistence.Path = newOnlineStorePath + resourceNew.Spec.Services.Registry.Local.Persistence.FilePersistence.Path = newRegistryPath + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check registry + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + registryContainer = services.GetRegistryContainer(*deploy) + env = getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig.OnlineStore.Path = newOnlineStorePath + testConfig.Registry.Path = newRegistryPath + Expect(repoConfig).To(Equal(testConfig)) + + // check offline config + offlineContainer = services.GetRegistryContainer(*deploy) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(testConfig)) + + // check online config + onlineContainer = services.GetOnlineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + // check envFrom + // Validate `envFrom` for ConfigMap and Secret + assertEnvFrom(*onlineContainer) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + + repoConfigOnline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + testConfig.OnlineStore.Path = newOnlineStorePath + Expect(repoConfigOnline).To(Equal(testConfig)) + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go new file mode 100644 index 00000000000..dd799a8c8e8 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go @@ -0,0 +1,499 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/authz" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { + Context("When deploying a resource with all ephemeral services and Kubernetes authorization", func() { + const resourceName = "kubernetes-authorization" + var pullPolicy = corev1.PullAlways + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + roles := []string{"reader", "writer"} + + BeforeEach(func() { + createEnvFromSecretAndConfigMap() + + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}, withEnvFrom()) + resource.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{KubernetesAuthz: &feastdevv1alpha1.KubernetesAuthz{ + Roles: roles, + }} + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + deleteEnvFromSecretAndConfigMap() + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + expectedAuthzConfig := &feastdevv1alpha1.AuthzConfig{ + KubernetesAuthz: &feastdevv1alpha1.KubernetesAuthz{ + Roles: roles, + }, + } + Expect(resource.Status.Applied.AuthzConfig).To(Equal(expectedAuthzConfig)) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.Type).To(Equal(string(services.OfflineFilePersistenceDaskConfigType))) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultOnlineStorePath)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Env).To(Equal(&[]corev1.EnvVar{})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultRegistryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.AuthorizationReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.KubernetesAuthzReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers[0].VolumeMounts).To(HaveLen(1)) + + // check configured Roles + for _, roleName := range roles { + role := &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: roleName, + Namespace: resource.Namespace, + }, + role) + Expect(err).NotTo(HaveOccurred()) + Expect(role.Rules).To(BeEmpty()) + } + + // check Feast Role + feastRole := &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + feastRole) + Expect(err).NotTo(HaveOccurred()) + Expect(feastRole.Rules).ToNot(BeEmpty()) + Expect(feastRole.Rules).To(HaveLen(1)) + Expect(feastRole.Rules[0].APIGroups).To(HaveLen(1)) + Expect(feastRole.Rules[0].APIGroups[0]).To(Equal(rbacv1.GroupName)) + Expect(feastRole.Rules[0].Resources).To(HaveLen(2)) + Expect(feastRole.Rules[0].Resources).To(ContainElement("roles")) + Expect(feastRole.Rules[0].Resources).To(ContainElement("rolebindings")) + Expect(feastRole.Rules[0].Verbs).To(HaveLen(3)) + Expect(feastRole.Rules[0].Verbs).To(ContainElement("get")) + Expect(feastRole.Rules[0].Verbs).To(ContainElement("list")) + Expect(feastRole.Rules[0].Verbs).To(ContainElement("watch")) + + // check RoleBinding + roleBinding := &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + roleBinding) + Expect(err).NotTo(HaveOccurred()) + + // check ServiceAccounts + expectedRoleRef := rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: feastRole.Name, + } + sa := &corev1.ServiceAccount{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: services.GetFeastName(feast.Handler.FeatureStore), + Namespace: resource.Namespace, + }, + sa) + Expect(err).NotTo(HaveOccurred()) + + expectedSubject := rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Name: sa.Name, + Namespace: sa.Namespace, + } + Expect(roleBinding.Subjects).To(ContainElement(expectedSubject)) + Expect(roleBinding.RoleRef).To(Equal(expectedRoleRef)) + + By("Updating the user roled and reconciling") + resourceNew := resource.DeepCopy() + rolesNew := roles[1:] + resourceNew.Spec.AuthzConfig.KubernetesAuthz.Roles = rolesNew + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check new Roles + for _, roleName := range rolesNew { + role := &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: roleName, + Namespace: resource.Namespace, + }, + role) + Expect(err).NotTo(HaveOccurred()) + Expect(role.Rules).To(BeEmpty()) + } + + // check deleted Role + role := &rbacv1.Role{} + deletedRole := roles[0] + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: deletedRole, + Namespace: resource.Namespace, + }, + role) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + + By("Clearing the kubernetes authorization and reconciling") + resourceNew = resource.DeepCopy() + resourceNew.Spec.AuthzConfig = nil + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check no Roles + for _, roleName := range roles { + role := &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: roleName, + Namespace: resource.Namespace, + }, + role) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + } + // check no RoleBinding + roleBinding = &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + roleBinding) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(4)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check registry + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + env := getFeatureStoreYamlEnvVar(services.GetRegistryContainer(*deploy).Env) + Expect(env).NotTo(BeNil()) + + // check registry config + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := feast.GetDefaultRepoConfig() + testConfig.OfflineStore = services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDaskConfigType, + } + testConfig.Registry.RegistryType = services.RegistryFileConfigType + testConfig.AuthzConfig = services.AuthzConfig{ + Type: services.KubernetesAuthType, + } + Expect(repoConfig).To(Equal(&testConfig)) + + // check offline + offlineContainer := services.GetOfflineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + assertEnvFrom(*offlineContainer) + + // check offline config + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig).To(Equal(&testConfig)) + + // check online + onlineContainer := services.GetOnlineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + assertEnvFrom(*onlineContainer) + + // check online config + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig).To(Equal(&testConfig)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + regRemote := services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + } + offlineRemote := services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + } + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: offlineRemote, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + Type: services.OnlineRemoteConfigType, + }, + Registry: regRemote, + AuthzConfig: services.AuthzConfig{ + Type: services.KubernetesAuthType, + }, + } + Expect(repoConfigClient).To(Equal(clientConfig)) + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go b/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go new file mode 100644 index 00000000000..90b80c907d5 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go @@ -0,0 +1,250 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller - Feast service LogLevel", func() { + Context("When reconciling a FeatureStore resource", func() { + const resourceName = "test-loglevel" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + + BeforeEach(func() { + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: feastProject, + Services: &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{ + LogLevel: strPtr("error"), + }, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + LogLevel: strPtr("debug"), + }, + }, + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + LogLevel: strPtr("info"), + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{ + LogLevel: strPtr("info"), + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource with logLevel", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + command := services.GetRegistryContainer(*deploy).Command + Expect(command).To(ContainElement("--log-level")) + Expect(command).To(ContainElement("ERROR")) + + command = services.GetOfflineContainer(*deploy).Command + Expect(command).To(ContainElement("--log-level")) + Expect(command).To(ContainElement("INFO")) + + command = services.GetOnlineContainer(*deploy).Command + Expect(command).To(ContainElement("--log-level")) + Expect(command).To(ContainElement("DEBUG")) + }) + + It("should not include --log-level parameter when logLevel is not specified for any service", func() { + By("Updating the FeatureStore resource without specifying logLevel for any service") + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + }, + OfflineStore: &feastdevv1alpha1.OfflineStore{}, + UI: &feastdevv1alpha1.ServerConfigs{}, + } + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(3)) + command := services.GetRegistryContainer(*deploy).Command + Expect(command).NotTo(ContainElement("--log-level")) + + command = services.GetOnlineContainer(*deploy).Command + Expect(command).NotTo(ContainElement("--log-level")) + + command = services.GetUIContainer(*deploy).Command + Expect(command).NotTo(ContainElement("--log-level")) + }) + + }) +}) + +func strPtr(str string) *string { + return &str +} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go new file mode 100644 index 00000000000..81dc15d8545 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller-Ephemeral services", func() { + Context("When deploying a resource with all ephemeral services", func() { + const resourceName = "services-object-store" + var pullPolicy = corev1.PullAlways + var testEnvVarName = "testEnvVarName" + var testEnvVarValue = "testEnvVarValue" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + registryPath := "s3://bucket/registry.db" + + s3AdditionalKwargs := map[string]string{ + "key1": "value1", + "key2": "value2", + } + + BeforeEach(func() { + createEnvFromSecretAndConfigMap() + + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}, withEnvFrom()) + resource.Spec.Services.UI = nil + resource.Spec.Services.OfflineStore = nil + resource.Spec.Services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: registryPath, + S3AdditionalKwargs: &s3AdditionalKwargs, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + deleteEnvFromSecretAndConfigMap() + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).To(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.UI).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(registryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).To(Equal(&s3AdditionalKwargs)) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(BeEmpty()) + Expect(resource.Status.ServiceHostnames.OnlineStore).NotTo(BeEmpty()) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).NotTo(BeNil()) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(2)) + Expect(services.GetRegistryContainer(*deploy)).NotTo(BeNil()) + Expect(services.GetOnlineContainer(*deploy)).NotTo(BeNil()) + Expect(services.GetOfflineContainer(*deploy)).To(BeNil()) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers[0].VolumeMounts).To(HaveLen(1)) + + // update S3 additional args and reconcile + resourceNew := resource.DeepCopy() + newS3AdditionalKwargs := make(map[string]string) + for k, v := range s3AdditionalKwargs { + newS3AdditionalKwargs[k] = v + } + newS3AdditionalKwargs["key3"] = "value3" + resourceNew.Spec.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs = &newS3AdditionalKwargs + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).NotTo(Equal(&s3AdditionalKwargs)) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).To(Equal(&newS3AdditionalKwargs)) + + // check registry deployment + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.VolumeMounts).To(HaveLen(1)) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(2)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(2)) + Expect(services.GetRegistryContainer(*deploy)).NotTo(BeNil()) + Expect(services.GetOnlineContainer(*deploy)).NotTo(BeNil()) + Expect(services.GetOfflineContainer(*deploy)).To(BeNil()) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers[0].VolumeMounts).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) + Expect(env).NotTo(BeNil()) + + // check registry config + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := feast.GetDefaultRepoConfig() + testConfig.Registry = services.RegistryConfig{ + RegistryType: services.RegistryFileConfigType, + Path: registryPath, + S3AdditionalKwargs: &s3AdditionalKwargs, + } + Expect(repoConfig).To(Equal(&testConfig)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + clientConfig := feast.GetInitRepoConfig() + clientConfig.OnlineStore = services.OnlineStoreConfig{ + Type: services.OnlineRemoteConfigType, + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + } + clientConfig.Registry = services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + } + Expect(repoConfigClient).To(Equal(&clientConfig)) + + // remove S3 additional keywords and reconcile + resourceNew := resource.DeepCopy() + resourceNew.Spec.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs = nil + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check registry config + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + env = getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) + Expect(env).NotTo(BeNil()) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig.Registry.S3AdditionalKwargs = nil + Expect(repoConfig).To(Equal(&testConfig)) + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go new file mode 100644 index 00000000000..f192d07cd08 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go @@ -0,0 +1,537 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/authz" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller-OIDC authorization", func() { + Context("When deploying a resource with all ephemeral services and OIDC authorization", func() { + const resourceName = "oidc-authorization" + const oidcSecretName = "oidc-secret" + var pullPolicy = corev1.PullAlways + + ctx := context.Background() + + typeNamespacedSecretName := types.NamespacedName{ + Name: oidcSecretName, + Namespace: "default", + } + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + + BeforeEach(func() { + By("creating the OIDC secret") + oidcSecret := createValidOidcSecret(oidcSecretName) + err := k8sClient.Get(ctx, typeNamespacedSecretName, oidcSecret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, oidcSecret)).To(Succeed()) + } + + createEnvFromSecretAndConfigMap() + + By("creating the custom resource for the Kind FeatureStore") + err = k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}, withEnvFrom()) + resource.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + SecretRef: corev1.LocalObjectReference{ + Name: oidcSecretName, + }, + }} + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + oidcSecret := createValidOidcSecret(oidcSecretName) + err = k8sClient.Get(ctx, typeNamespacedSecretName, oidcSecret) + if err != nil && errors.IsNotFound(err) { + By("Cleanup the OIDC secret") + Expect(k8sClient.Delete(ctx, oidcSecret)).To(Succeed()) + } + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + deleteEnvFromSecretAndConfigMap() + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + expectedAuthzConfig := &feastdevv1alpha1.AuthzConfig{ + OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + SecretRef: corev1.LocalObjectReference{ + Name: oidcSecretName, + }, + }, + } + Expect(resource.Status.Applied.AuthzConfig).To(Equal(expectedAuthzConfig)) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.Type).To(Equal(string(services.OfflineFilePersistenceDaskConfigType))) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultOnlineStorePath)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Env).To(Equal(&[]corev1.EnvVar{})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultRegistryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) + Expect(services.GetOfflineContainer(*deploy).VolumeMounts).To(HaveLen(1)) + Expect(services.GetOnlineContainer(*deploy).VolumeMounts).To(HaveLen(1)) + Expect(services.GetRegistryContainer(*deploy).VolumeMounts).To(HaveLen(1)) + + assertEnvFrom(*services.GetOnlineContainer(*deploy)) + assertEnvFrom(*services.GetOfflineContainer(*deploy)) + + // check Feast Role + feastRole := &rbacv1.Role{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + feastRole) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + + // check RoleBinding + roleBinding := &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + roleBinding) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + + // check ServiceAccount + sa := &corev1.ServiceAccount{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: services.GetFeastName(feast.Handler.FeatureStore), + Namespace: resource.Namespace, + }, + sa) + Expect(err).NotTo(HaveOccurred()) + + By("Clearing the OIDC authorization and reconciling") + resourceNew := resource.DeepCopy() + resourceNew.Spec.AuthzConfig = nil + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check no RoleBinding + roleBinding = &rbacv1.RoleBinding{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: authz.GetFeastRoleName(resource), + Namespace: resource.Namespace, + }, + roleBinding) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(4)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + env := getFeatureStoreYamlEnvVar(services.GetRegistryContainer(*deploy).Env) + Expect(env).NotTo(BeNil()) + + // check registry config + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDaskConfigType, + }, + Registry: services.RegistryConfig{ + RegistryType: services.RegistryFileConfigType, + Path: services.EphemeralPath + "/" + services.DefaultRegistryPath, + }, + OnlineStore: services.OnlineStoreConfig{ + Path: services.EphemeralPath + "/" + services.DefaultOnlineStorePath, + Type: services.OnlineSqliteConfigType, + }, + AuthzConfig: expectedServerOidcAuthorizConfig(), + } + Expect(repoConfig).To(Equal(testConfig)) + + // check offline + env = getFeatureStoreYamlEnvVar(services.GetOfflineContainer(*deploy).Env) + Expect(env).NotTo(BeNil()) + + // check offline config + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig).To(Equal(testConfig)) + + // check online + env = getFeatureStoreYamlEnvVar(services.GetOnlineContainer(*deploy).Env) + Expect(env).NotTo(BeNil()) + + // check online config + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig).To(Equal(testConfig)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + offlineRemote := services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + } + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: offlineRemote, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + Type: services.OnlineRemoteConfigType, + }, + Registry: services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + }, + AuthzConfig: expectedClientOidcAuthorizConfig(), + } + Expect(repoConfigClient).To(Equal(clientConfig)) + }) + + It("should fail to reconcile the resource", func() { + By("Reconciling an invalid OIDC set of properties") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + newOidcSecretName := "invalid-secret" + newTypeNamespaceSecretdName := types.NamespacedName{ + Name: newOidcSecretName, + Namespace: "default", + } + newOidcSecret := createInvalidOidcSecret(newOidcSecretName) + err := k8sClient.Get(ctx, newTypeNamespaceSecretdName, newOidcSecret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, newOidcSecret)).To(Succeed()) + } + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.AuthzConfig.OidcAuthz.SecretRef.Name = newOidcSecretName + err = k8sClient.Update(ctx, resource) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.FailedReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(ContainSubstring("missing OIDC")) + }) + }) +}) + +func expectedServerOidcAuthorizConfig() services.AuthzConfig { + return services.AuthzConfig{ + Type: services.OidcAuthType, + OidcParameters: map[string]interface{}{ + string(services.OidcAuthDiscoveryUrl): "auth-discovery-url", + string(services.OidcClientId): "client-id", + }, + } +} +func expectedClientOidcAuthorizConfig() services.AuthzConfig { + return services.AuthzConfig{ + Type: services.OidcAuthType, + OidcParameters: map[string]interface{}{ + string(services.OidcClientSecret): "client-secret", + string(services.OidcUsername): "username", + string(services.OidcPassword): "password"}, + } +} + +func validOidcSecretMap() map[string]string { + return map[string]string{ + string(services.OidcClientId): "client-id", + string(services.OidcAuthDiscoveryUrl): "auth-discovery-url", + string(services.OidcClientSecret): "client-secret", + string(services.OidcUsername): "username", + string(services.OidcPassword): "password", + } +} + +func createValidOidcSecret(secretName string) *corev1.Secret { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: "default", + }, + StringData: validOidcSecretMap(), + } + + return secret +} + +func createInvalidOidcSecret(secretName string) *corev1.Secret { + oidcProperties := validOidcSecretMap() + delete(oidcProperties, string(services.OidcClientId)) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: "default", + }, + StringData: oidcProperties, + } + + return secret +} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go new file mode 100644 index 00000000000..ec40527ceae --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go @@ -0,0 +1,653 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + "path" + + apiresource "k8s.io/apimachinery/pkg/api/resource" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller-Ephemeral services", func() { + Context("When deploying a resource with all ephemeral services", func() { + const resourceName = "services-pvc" + var pullPolicy = corev1.PullAlways + var testEnvVarName = "testEnvVarName" + var testEnvVarValue = "testEnvVarValue" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + onlineStorePath := "online.db" + registryPath := "registry.db" + offlineType := "duckdb" + + offlineStoreMountPath := "/offline" + onlineStoreMountPath := "/online" + registryMountPath := "/registry" + + accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany} + storageClassName := "test" + + onlineStoreMountedPath := path.Join(onlineStoreMountPath, onlineStorePath) + registryMountedPath := path.Join(registryMountPath, registryPath) + + BeforeEach(func() { + createEnvFromSecretAndConfigMap() + + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}, withEnvFrom()) + resource.Spec.Services.UI = nil + resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + Type: offlineType, + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{ + AccessModes: accessModes, + StorageClassName: &storageClassName, + }, + MountPath: offlineStoreMountPath, + }, + }, + } + resource.Spec.Services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: onlineStorePath, + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: onlineStoreMountPath, + }, + }, + } + resource.Spec.Services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: registryPath, + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: registryMountPath, + }, + }, + } + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + deleteEnvFromSecretAndConfigMap() + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.Type).To(Equal(offlineType)) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.AccessModes).To(Equal(accessModes)) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.StorageClassName).To(Equal(&storageClassName)) + expectedResources := corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: apiresource.MustParse("20Gi"), + }, + } + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources).To(Equal(expectedResources)) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.Path).To(Equal(onlineStorePath)) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.AccessModes).To(Equal(services.DefaultPVCAccessModes)) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.StorageClassName).To(BeNil()) + expectedResources = corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: apiresource.MustParse("5Gi"), + }, + } + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.Resources).To(Equal(expectedResources)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Env).To(Equal(&[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(registryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.AccessModes).To(Equal(services.DefaultPVCAccessModes)) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.StorageClassName).To(BeNil()) + expectedResources = corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: apiresource.MustParse("5Gi"), + }, + } + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.Resources).To(Equal(expectedResources)) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + ephemeralName := "feast-data" + ephemeralVolume := corev1.Volume{ + Name: ephemeralName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + ephemeralVolMount := corev1.VolumeMount{ + Name: ephemeralName, + MountPath: "/" + ephemeralName, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(3)) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(3)) + Expect(deploy.Spec.Template.Spec.Volumes).NotTo(ContainElement(ephemeralVolume)) + name := feast.GetFeastServiceName(services.RegistryFeastType) + regVol := services.GetRegistryVolume(feast.Handler.FeatureStore, deploy.Spec.Template.Spec.Volumes) + Expect(regVol.Name).To(Equal(name)) + Expect(regVol.PersistentVolumeClaim.ClaimName).To(Equal(name)) + + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.VolumeMounts).To(HaveLen(3)) + Expect(offlineContainer.VolumeMounts).NotTo(ContainElement(ephemeralVolMount)) + offlineVolMount := services.GetOfflineVolumeMount(feast.Handler.FeatureStore, offlineContainer.VolumeMounts) + Expect(offlineVolMount.MountPath).To(Equal(offlineStoreMountPath)) + offlinePvcName := feast.GetFeastServiceName(services.OfflineFeastType) + Expect(offlineVolMount.Name).To(Equal(offlinePvcName)) + + assertEnvFrom(*offlineContainer) + + // check offline pvc + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: offlinePvcName, + Namespace: resource.Namespace, + }, + pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.StorageClassName).To(Equal(&storageClassName)) + Expect(pvc.Spec.AccessModes).To(Equal(accessModes)) + Expect(pvc.Spec.Resources.Requests.Storage().String()).To(Equal(services.DefaultOfflineStorageRequest)) + Expect(pvc.DeletionTimestamp).To(BeNil()) + + // check online + onlinePvcName := feast.GetFeastServiceName(services.OnlineFeastType) + onlineVol := services.GetOnlineVolume(feast.Handler.FeatureStore, deploy.Spec.Template.Spec.Volumes) + Expect(onlineVol.Name).To(Equal(onlinePvcName)) + Expect(onlineVol.PersistentVolumeClaim.ClaimName).To(Equal(onlinePvcName)) + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.VolumeMounts).To(HaveLen(3)) + Expect(onlineContainer.VolumeMounts).NotTo(ContainElement(ephemeralVolMount)) + onlineVolMount := services.GetOnlineVolumeMount(feast.Handler.FeatureStore, onlineContainer.VolumeMounts) + Expect(onlineVolMount.MountPath).To(Equal(onlineStoreMountPath)) + Expect(onlineVolMount.Name).To(Equal(onlinePvcName)) + + assertEnvFrom(*onlineContainer) + + // check online pvc + pvc = &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: onlinePvcName, + Namespace: resource.Namespace, + }, + pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Name).To(Equal(onlinePvcName)) + Expect(pvc.Spec.AccessModes).To(Equal(services.DefaultPVCAccessModes)) + Expect(pvc.Spec.Resources.Requests.Storage().String()).To(Equal(services.DefaultOnlineStorageRequest)) + Expect(pvc.DeletionTimestamp).To(BeNil()) + + // check registry + registryPvcName := feast.GetFeastServiceName(services.RegistryFeastType) + registryVol := services.GetRegistryVolume(feast.Handler.FeatureStore, deploy.Spec.Template.Spec.Volumes) + Expect(registryVol.Name).To(Equal(registryPvcName)) + Expect(registryVol.PersistentVolumeClaim.ClaimName).To(Equal(registryPvcName)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.VolumeMounts).To(HaveLen(3)) + Expect(registryContainer.VolumeMounts).NotTo(ContainElement(ephemeralVolMount)) + registryVolMount := services.GetRegistryVolumeMount(feast.Handler.FeatureStore, registryContainer.VolumeMounts) + Expect(registryVolMount.MountPath).To(Equal(registryMountPath)) + Expect(registryVolMount.Name).To(Equal(registryPvcName)) + + // check registry pvc + pvc = &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: registryPvcName, + Namespace: resource.Namespace, + }, + pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Name).To(Equal(registryPvcName)) + Expect(pvc.Spec.AccessModes).To(Equal(services.DefaultPVCAccessModes)) + Expect(pvc.Spec.Resources.Requests.Storage().String()).To(Equal(services.DefaultRegistryStorageRequest)) + Expect(pvc.DeletionTimestamp).To(BeNil()) + + // remove online PVC and reconcile + resourceNew := resource.DeepCopy() + newOnlineStorePath := "/tmp/new_online.db" + resourceNew.Spec.Services.OnlineStore.Persistence.FilePersistence.Path = newOnlineStorePath + resourceNew.Spec.Services.OnlineStore.Persistence.FilePersistence.PvcConfig = nil + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig).To(BeNil()) + + // check online deployment/container + deploy = &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(3)) + Expect(deploy.Spec.Template.Spec.Volumes).To(ContainElement(ephemeralVolume)) + Expect(services.GetOnlineContainer(*deploy).VolumeMounts).To(HaveLen(3)) + Expect(services.GetOnlineContainer(*deploy).VolumeMounts).To(ContainElement(ephemeralVolMount)) + Expect(services.GetRegistryContainer(*deploy).VolumeMounts).To(ContainElement(ephemeralVolMount)) + Expect(services.GetOfflineContainer(*deploy).VolumeMounts).To(ContainElement(ephemeralVolMount)) + + // check online pvc is deleted + log.FromContext(feast.Handler.Context).Info("Checking deletion of", "PersistentVolumeClaim", deploy.Name) + pvc = &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: onlinePvcName, + Namespace: resource.Namespace, + }, + pvc) + if err != nil { + Expect(errors.IsNotFound(err)).To(BeTrue()) + } else { + Expect(pvc.DeletionTimestamp).NotTo(BeNil()) + } + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(3)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(3)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + + // check registry config + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + Registry: services.RegistryConfig{ + RegistryType: services.RegistryFileConfigType, + Path: registryMountedPath, + }, + OfflineStore: services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDuckDbConfigType, + }, + OnlineStore: services.OnlineStoreConfig{ + Path: onlineStoreMountedPath, + Type: services.OnlineSqliteConfigType, + }, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfig).To(Equal(testConfig)) + + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.Env).To(HaveLen(1)) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + // check offline config + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(testConfig)) + + // check online config + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(3)) + Expect(onlineContainer.ImagePullPolicy).To(Equal(corev1.PullAlways)) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOnline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOnline).To(Equal(testConfig)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + offlineRemote := services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + } + regRemote := services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:80", resourceName), + } + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: offlineRemote, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("http://feast-%s-online.default.svc.cluster.local:80", resourceName), + Type: services.OnlineRemoteConfigType, + }, + Registry: regRemote, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfigClient).To(Equal(clientConfig)) + + // change paths and reconcile + resourceNew := resource.DeepCopy() + newOnlineStorePath := "new_online.db" + newRegistryPath := "new_registry.db" + + newOnlineStoreMountedPath := path.Join(onlineStoreMountPath, newOnlineStorePath) + newRegistryMountedPath := path.Join(registryMountPath, newRegistryPath) + + resourceNew.Spec.Services.OnlineStore.Persistence.FilePersistence.Path = newOnlineStorePath + resourceNew.Spec.Services.Registry.Local.Persistence.FilePersistence.Path = newRegistryPath + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check registry config + deploy = &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + registryContainer = services.GetRegistryContainer(*deploy) + env = getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig.OnlineStore.Path = newOnlineStoreMountedPath + testConfig.Registry.Path = newRegistryMountedPath + Expect(repoConfig).To(Equal(testConfig)) + + // check offline config + offlineContainer = services.GetOfflineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(testConfig)) + + // check online config + onlineContainer = services.GetOfflineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + + repoConfigOnline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + testConfig.OnlineStore.Path = newOnlineStoreMountedPath + Expect(repoConfigOnline).To(Equal(testConfig)) + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/featurestore_controller_test.go b/infra/feast-operator/internal/controller/featurestore_controller_test.go index 10b5f64c567..c73fbe1bff3 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_test.go @@ -39,10 +39,15 @@ import ( "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" ) const feastProject = "test_project" +const domain = ".svc.cluster.local:80" +const domainTls = ".svc.cluster.local:443" + +var image = "test:latest" var _ = Describe("FeatureStore Controller", func() { Context("When reconciling a resource", func() { @@ -115,27 +120,53 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cmList.Items).To(HaveLen(1)) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + deployment, _ := feast.GetDeployment() + deployment.Status = appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: "True", // Mark as available + Reason: "MinimumReplicasAvailable", + }, + }, } + + // Update the deployment's status + err = controllerReconciler.Status().Update(context.Background(), &deployment) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + Expect(resource.Status).NotTo(BeNil()) Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) Expect(resource.Status.ServiceHostnames.OfflineStore).To(BeEmpty()) - Expect(resource.Status.ServiceHostnames.OnlineStore).To(BeEmpty()) - Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + ".svc.cluster.local:80")) + Expect(resource.Status.ServiceHostnames.Registry).To(BeEmpty()) + Expect(resource.Status.ServiceHostnames.UI).To(BeEmpty()) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + ".svc.cluster.local:80")) Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) Expect(resource.Status.Applied.Services).NotTo(BeNil()) Expect(resource.Status.Applied.Services.OfflineStore).To(BeNil()) - Expect(resource.Status.Applied.Services.OnlineStore).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) - Expect(resource.Status.Applied.Services.OnlineStore).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Remote).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.ImagePullPolicy).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.Resources).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.Registry).To(BeNil()) + Expect(resource.Status.Applied.Services.UI).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry).To(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) Expect(resource.Status.Conditions).NotTo(BeEmpty()) cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) @@ -144,13 +175,15 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) Expect(cond.Message).To(Equal(feastdevv1alpha1.ReadyMessage)) + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) - cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) Expect(cond).ToNot(BeNil()) Expect(cond.Status).To(Equal(metav1.ConditionTrue)) Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) - Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) - Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) Expect(cond).ToNot(BeNil()) @@ -162,25 +195,96 @@ var _ = Describe("FeatureStore Controller", func() { Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.ReadyPhase)) deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Args[0]).To(ContainSubstring("feast init")) Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) svc := &corev1.Service{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), + Name: feast.GetFeastServiceName(services.OnlineFeastType), Namespace: resource.Namespace, }, svc) Expect(err).NotTo(HaveOccurred()) Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) - Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetPort)))) + Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.OnlineFeastType].TargetHttpPort)))) + + // change projectDir to use a git repo + featureRepoPath := "test/dir/feature_repo2" + ref := "xxxxx" + envVars := []corev1.EnvVar{ + { + Name: "test", + Value: "value", + }, + } + resource.Spec.FeastProjectDir = &feastdevv1alpha1.FeastProjectDir{ + Git: &feastdevv1alpha1.GitCloneOptions{ + URL: "test", + Ref: ref, + FeatureRepoPath: featureRepoPath, + Configs: map[string]string{ + "http.sslVerify": "false", + }, + Env: &envVars, + }, + } + err = k8sClient.Update(ctx, resource) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Args[0]).To(ContainSubstring("git -c http.sslVerify=false clone")) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Args[0]).To(ContainSubstring("git checkout " + ref)) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Args[0]).To(ContainSubstring(featureRepoPath)) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Env).To(ContainElements(envVars)) + + online := services.GetOnlineContainer(*deploy) + Expect(online.WorkingDir).To(Equal(services.EphemeralPath + "/" + resource.Spec.FeastProject + "/" + featureRepoPath)) + + // change projectDir to use an init template + resource.Spec.FeastProjectDir = &feastdevv1alpha1.FeastProjectDir{ + Init: &feastdevv1alpha1.FeastInitOptions{ + Template: "spark", + }, + } + err = k8sClient.Update(ctx, resource) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(deploy.Spec.Template.Spec.InitContainers[0].Args[0]).To(ContainSubstring("feast init -t spark")) }) It("should properly encode a feature_store.yaml config", func() { @@ -200,25 +304,29 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + Expect(deploy.Spec.Strategy.Type).To(Equal(appsv1.RecreateDeploymentStrategyType)) Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) env := getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64(services.RegistryFeastType) + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -227,16 +335,8 @@ var _ = Describe("FeatureStore Controller", func() { repoConfig := &services.RepoConfig{} err = yaml.Unmarshal(envByte, repoConfig) Expect(err).NotTo(HaveOccurred()) - testConfig := &services.RepoConfig{ - Project: feastProject, - Provider: services.LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - Registry: services.RegistryConfig{ - RegistryType: services.RegistryFileConfigType, - Path: services.LocalRegistryPath, - }, - } - Expect(repoConfig).To(Equal(testConfig)) + testConfig := feast.GetDefaultRepoConfig() + Expect(repoConfig).To(Equal(&testConfig)) // check client config cm := &corev1.ConfigMap{} @@ -250,20 +350,21 @@ var _ = Describe("FeatureStore Controller", func() { repoConfigClient := &services.RepoConfig{} err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) Expect(err).NotTo(HaveOccurred()) - clientConfig := &services.RepoConfig{ - Project: feastProject, - Provider: services.LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - Registry: services.RegistryConfig{ - RegistryType: services.RegistryRemoteConfigType, - Path: "feast-test-resource-registry.default.svc.cluster.local:80", - }, + clientConfig := feast.GetInitRepoConfig() + clientConfig.OnlineStore = services.OnlineStoreConfig{ + Type: services.OnlineRemoteConfigType, + Path: "http://feast-test-resource-online.default.svc.cluster.local:80", } - Expect(repoConfigClient).To(Equal(clientConfig)) + Expect(repoConfigClient).To(Equal(&clientConfig)) // change feast project and reconcile resourceNew := resource.DeepCopy() resourceNew.Spec.FeastProject = "changed" + resourceNew.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + DeploymentStrategy: &appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + } err = k8sClient.Update(ctx, resourceNew) Expect(err).NotTo(HaveOccurred()) _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ @@ -275,18 +376,19 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) Expect(resource.Spec.FeastProject).To(Equal(resourceNew.Spec.FeastProject)) err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, + Name: objMeta.Name, + Namespace: objMeta.Namespace, }, deploy) Expect(err).NotTo(HaveOccurred()) testConfig.Project = resourceNew.Spec.FeastProject + Expect(deploy.Spec.Strategy.Type).To(Equal(appsv1.RollingUpdateDeploymentStrategyType)) Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) env = getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.RegistryFeastType) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -294,7 +396,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) err = yaml.Unmarshal(envByte, repoConfig) Expect(err).NotTo(HaveOccurred()) - Expect(repoConfig).To(Equal(testConfig)) + Expect(repoConfig).To(Equal(&testConfig)) }) It("should error on reconcile", func() { @@ -314,18 +416,20 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) err = controllerutil.RemoveControllerReference(resource, deploy, controllerReconciler.Scheme) @@ -333,7 +437,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(controllerutil.HasControllerReference(deploy)).To(BeFalse()) svc := &corev1.Service{} - name := feast.GetFeastServiceName(services.RegistryFeastType) + name := feast.GetFeastServiceName(services.OnlineFeastType) err = k8sClient.Get(ctx, types.NamespacedName{ Name: name, Namespace: resource.Namespace, @@ -360,14 +464,16 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) Expect(cond.Status).To(Equal(metav1.ConditionFalse)) Expect(cond.Reason).To(Equal(feastdevv1alpha1.FailedReason)) - Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + name + " is already owned by another Service controller " + name)) + Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + deploy.Name + " is already owned by another Service controller " + name)) - cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) Expect(cond).ToNot(BeNil()) - Expect(cond.Status).To(Equal(metav1.ConditionFalse)) - Expect(cond.Reason).To(Equal(feastdevv1alpha1.RegistryFailedReason)) - Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) - Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + name + " is already owned by another Service controller " + name)) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) Expect(cond).ToNot(BeNil()) @@ -382,7 +488,6 @@ var _ = Describe("FeatureStore Controller", func() { Context("When reconciling a resource with all services enabled", func() { const resourceName = "services" - image := "test:latest" var pullPolicy = corev1.PullAlways var testEnvVarName = "testEnvVarName" var testEnvVarValue = "testEnvVarValue" @@ -396,11 +501,13 @@ var _ = Describe("FeatureStore Controller", func() { featurestore := &feastdevv1alpha1.FeatureStore{} BeforeEach(func() { + createEnvFromSecretAndConfigMap() + By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, - {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}) + {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}, withEnvFrom()) Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } }) @@ -411,6 +518,9 @@ var _ = Describe("FeatureStore Controller", func() { By("Cleanup the specific resource instance FeatureStore") Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + // Delete ConfigMap + deleteEnvFromSecretAndConfigMap() }) It("should successfully reconcile the resource", func() { @@ -430,43 +540,64 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } Expect(resource.Status).NotTo(BeNil()) Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) Expect(resource.Status.Applied.Services).NotTo(BeNil()) Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) - Expect(resource.Status.Applied.Services.OfflineStore.ImagePullPolicy).To(BeNil()) - Expect(resource.Status.Applied.Services.OfflineStore.Resources).To(BeNil()) - Expect(resource.Status.Applied.Services.OfflineStore.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.Type).To(Equal("dask")) + Expect(resource.Status.Applied.Services.OfflineStore.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore.Server.Image).To(Equal(&services.DefaultImage)) Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) - Expect(resource.Status.Applied.Services.OnlineStore.Env).To(Equal(&[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})) - Expect(resource.Status.Applied.Services.OnlineStore.ImagePullPolicy).To(Equal(&pullPolicy)) - Expect(resource.Status.Applied.Services.OnlineStore.Resources).NotTo(BeNil()) - Expect(resource.Status.Applied.Services.OnlineStore.Image).To(Equal(&image)) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultOnlineStorePath)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Env).To(Equal(&[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})) + Expect(resource.Status.Applied.Services.OnlineStore.Server.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.OnlineStore.Server.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore.Server.Image).To(Equal(&image)) Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) Expect(resource.Status.Applied.Services.Registry.Local).NotTo(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.ImagePullPolicy).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.Resources).To(BeNil()) - Expect(resource.Status.Applied.Services.Registry.Local.Image).To(Equal(&services.DefaultImage)) - - domain := ".svc.cluster.local:80" + Expect(resource.Status.Applied.Services.Registry.Local.Persistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.Path).To(Equal(services.EphemeralPath + "/" + services.DefaultRegistryPath)) + Expect(resource.Status.Applied.Services.Registry.Local.Server.ImagePullPolicy).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Resources).To(BeNil()) + Expect(resource.Status.Applied.Services.Registry.Local.Server.Image).To(Equal(&services.DefaultImage)) + Expect(resource.Status.Applied.Services.UI).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.UI.Env).To(Equal(&[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})) + Expect(resource.Status.Applied.Services.UI.EnvFrom).To(Equal(withEnvFrom())) + Expect(resource.Status.Applied.Services.UI.ImagePullPolicy).To(Equal(&pullPolicy)) + Expect(resource.Status.Applied.Services.UI.Resources).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.UI.Image).To(Equal(&image)) Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domain)) Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domain)) Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domain)) + Expect(resource.Status.ServiceHostnames.UI).To(Equal(feast.GetFeastServiceName(services.UIFeastType) + "." + resource.Namespace + domain)) Expect(resource.Status.Conditions).NotTo(BeEmpty()) cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) Expect(cond).ToNot(BeNil()) - Expect(cond.Status).To(Equal(metav1.ConditionTrue)) - Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) - Expect(cond.Message).To(Equal(feastdevv1alpha1.ReadyMessage)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) Expect(cond).ToNot(BeNil()) @@ -496,19 +627,26 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) - Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.ReadyPhase)) + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.UIReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.UIReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.UIReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) svc := &corev1.Service{} err = k8sClient.Get(ctx, types.NamespacedName{ Name: feast.GetFeastServiceName(services.RegistryFeastType), @@ -517,7 +655,7 @@ var _ = Describe("FeatureStore Controller", func() { svc) Expect(err).NotTo(HaveOccurred()) Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) - Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetPort)))) + Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetHttpPort)))) }) It("should properly encode a feature_store.yaml config", func() { @@ -543,12 +681,17 @@ var _ = Describe("FeatureStore Controller", func() { deployList := appsv1.DeploymentList{} err = k8sClient.List(ctx, &deployList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(deployList.Items).To(HaveLen(3)) + Expect(deployList.Items).To(HaveLen(1)) + + saList := corev1.ServiceAccountList{} + err = k8sClient.List(ctx, &saList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(saList.Items).To(HaveLen(1)) svcList := corev1.ServiceList{} err = k8sClient.List(ctx, &svcList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(svcList.Items).To(HaveLen(3)) + Expect(svcList.Items).To(HaveLen(4)) cmList := corev1.ConfigMapList{} err = k8sClient.List(ctx, &cmList, listOpts) @@ -556,26 +699,30 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cmList.Items).To(HaveLen(1)) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } // check registry config deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, - }, - deploy) - Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) - env := getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(registryContainer.Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64(services.RegistryFeastType) + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -584,31 +731,22 @@ var _ = Describe("FeatureStore Controller", func() { repoConfig := &services.RepoConfig{} err = yaml.Unmarshal(envByte, repoConfig) Expect(err).NotTo(HaveOccurred()) - testConfig := &services.RepoConfig{ - Project: feastProject, - Provider: services.LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - Registry: services.RegistryConfig{ - RegistryType: services.RegistryFileConfigType, - Path: services.LocalRegistryPath, - }, + testConfig := feast.GetDefaultRepoConfig() + testConfig.OfflineStore = services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDaskConfigType, } - Expect(repoConfig).To(Equal(testConfig)) + Expect(repoConfig).To(Equal(&testConfig)) // check offline config - deploy = &appsv1.Deployment{} - err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.OfflineFeastType), - Namespace: resource.Namespace, - }, - deploy) - Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) - env = getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.Env).To(HaveLen(1)) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.OfflineFeastType) + assertEnvFrom(*offlineContainer) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -617,36 +755,18 @@ var _ = Describe("FeatureStore Controller", func() { repoConfigOffline := &services.RepoConfig{} err = yaml.Unmarshal(envByte, repoConfigOffline) Expect(err).NotTo(HaveOccurred()) - regRemote := services.RegistryConfig{ - RegistryType: services.RegistryRemoteConfigType, - Path: "feast-services-registry.default.svc.cluster.local:80", - } - offlineConfig := &services.RepoConfig{ - Project: feastProject, - Provider: services.LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - OfflineStore: services.OfflineStoreConfig{ - Type: services.OfflineDaskConfigType, - }, - Registry: regRemote, - } - Expect(repoConfigOffline).To(Equal(offlineConfig)) + Expect(repoConfigOffline).To(Equal(&testConfig)) // check online config - deploy = &appsv1.Deployment{} - err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.OnlineFeastType), - Namespace: resource.Namespace, - }, - deploy) - Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(3)) - Expect(deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways)) - env = getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(3)) + Expect(onlineContainer.ImagePullPolicy).To(Equal(corev1.PullAlways)) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.OnlineFeastType) + assertEnvFrom(*onlineContainer) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -655,24 +775,7 @@ var _ = Describe("FeatureStore Controller", func() { repoConfigOnline := &services.RepoConfig{} err = yaml.Unmarshal(envByte, repoConfigOnline) Expect(err).NotTo(HaveOccurred()) - offlineRemote := services.OfflineStoreConfig{ - Host: "feast-services-offline.default.svc.cluster.local", - Type: services.OfflineRemoteConfigType, - Port: services.HttpPort, - } - onlineConfig := &services.RepoConfig{ - Project: feastProject, - Provider: services.LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - OfflineStore: offlineRemote, - OnlineStore: services.OnlineStoreConfig{ - Path: services.LocalOnlinePath, - Type: services.OnlineSqliteConfigType, - }, - Registry: regRemote, - } - Expect(repoConfigOnline).To(Equal(onlineConfig)) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(3)) + Expect(repoConfigOnline).To(Equal(&testConfig)) // check client config cm := &corev1.ConfigMap{} @@ -686,6 +789,15 @@ var _ = Describe("FeatureStore Controller", func() { repoConfigClient := &services.RepoConfig{} err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) Expect(err).NotTo(HaveOccurred()) + offlineRemote := services.OfflineStoreConfig{ + Host: "feast-services-offline.default.svc.cluster.local", + Type: services.OfflineRemoteConfigType, + Port: services.HttpPort, + } + regRemote := services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: "feast-services-registry.default.svc.cluster.local:80", + } clientConfig := &services.RepoConfig{ Project: feastProject, Provider: services.LocalProviderType, @@ -695,7 +807,8 @@ var _ = Describe("FeatureStore Controller", func() { Path: "http://feast-services-online.default.svc.cluster.local:80", Type: services.OnlineRemoteConfigType, }, - Registry: regRemote, + Registry: regRemote, + AuthzConfig: noAuthzConfig(), } Expect(repoConfigClient).To(Equal(clientConfig)) @@ -713,8 +826,8 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) Expect(resource.Spec.FeastProject).To(Equal(resourceNew.Spec.FeastProject)) err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.RegistryFeastType), - Namespace: resource.Namespace, + Name: objMeta.Name, + Namespace: objMeta.Namespace, }, deploy) Expect(err).NotTo(HaveOccurred()) @@ -724,7 +837,7 @@ var _ = Describe("FeatureStore Controller", func() { env = getFeatureStoreYamlEnvVar(deploy.Spec.Template.Spec.Containers[0].Env) Expect(env).NotTo(BeNil()) - fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.RegistryFeastType) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) Expect(fsYamlStr).To(Equal(env.Value)) @@ -732,7 +845,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) err = yaml.Unmarshal(envByte, repoConfig) Expect(err).NotTo(HaveOccurred()) - Expect(repoConfig).To(Equal(testConfig)) + Expect(repoConfig).To(Equal(&testConfig)) }) It("should properly set container env variables", func() { @@ -758,12 +871,12 @@ var _ = Describe("FeatureStore Controller", func() { deployList := appsv1.DeploymentList{} err = k8sClient.List(ctx, &deployList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(deployList.Items).To(HaveLen(3)) + Expect(deployList.Items).To(HaveLen(1)) svcList := corev1.ServiceList{} err = k8sClient.List(ctx, &svcList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(svcList.Items).To(HaveLen(3)) + Expect(svcList.Items).To(HaveLen(4)) cmList := corev1.ConfigMapList{} err = k8sClient.List(ctx, &cmList, listOpts) @@ -771,32 +884,36 @@ var _ = Describe("FeatureStore Controller", func() { Expect(cmList.Items).To(HaveLen(1)) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } fsYamlStr := "" - fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.OnlineFeastType) + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() Expect(err).NotTo(HaveOccurred()) // check online config deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.OnlineFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(3)) - Expect(areEnvVarArraysEqual(deploy.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})).To(BeTrue()) - Expect(deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways)) + Expect(deploy.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy.Name)) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(3)) + Expect(areEnvVarArraysEqual(onlineContainer.Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: services.TmpFeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})).To(BeTrue()) + Expect(onlineContainer.ImagePullPolicy).To(Equal(corev1.PullAlways)) // change feast project and reconcile resourceNew := resource.DeepCopy() - resourceNew.Spec.Services.OnlineStore.Env = &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}} + resourceNew.Spec.Services.OnlineStore.Server.Env = &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.TmpFeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}} err = k8sClient.Update(ctx, resourceNew) Expect(err).NotTo(HaveOccurred()) _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ @@ -806,16 +923,16 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - Expect(areEnvVarArraysEqual(*resource.Status.Applied.Services.OnlineStore.Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}})).To(BeTrue()) + Expect(areEnvVarArraysEqual(*resource.Status.Applied.Services.OnlineStore.Server.Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.TmpFeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}})).To(BeTrue()) err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.OnlineFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) - Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(3)) - Expect(areEnvVarArraysEqual(deploy.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}})).To(BeTrue()) + onlineContainer = services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(3)) + Expect(areEnvVarArraysEqual(onlineContainer.Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.TmpFeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}})).To(BeTrue()) }) It("Should delete k8s objects owned by the FeatureStore CR", func() { @@ -841,15 +958,15 @@ var _ = Describe("FeatureStore Controller", func() { deployList := appsv1.DeploymentList{} err = k8sClient.List(ctx, &deployList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(deployList.Items).To(HaveLen(3)) + Expect(deployList.Items).To(HaveLen(1)) svcList := corev1.ServiceList{} err = k8sClient.List(ctx, &svcList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(svcList.Items).To(HaveLen(3)) + Expect(svcList.Items).To(HaveLen(4)) - // disable the Online Store service - resource.Spec.Services.OnlineStore = nil + // disable the UI Store service + resource.Spec.Services.UI = nil err = k8sClient.Update(ctx, resource) Expect(err).NotTo(HaveOccurred()) @@ -860,11 +977,11 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.List(ctx, &deployList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(deployList.Items).To(HaveLen(2)) + Expect(deployList.Items).To(HaveLen(1)) err = k8sClient.List(ctx, &svcList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(svcList.Items).To(HaveLen(2)) + Expect(svcList.Items).To(HaveLen(3)) err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) @@ -885,7 +1002,7 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.List(ctx, &svcList, listOpts) Expect(err).NotTo(HaveOccurred()) - Expect(svcList.Items).To(HaveLen(1)) + Expect(svcList.Items).To(HaveLen(2)) }) It("should handle remote registry references", func() { @@ -913,7 +1030,9 @@ var _ = Describe("FeatureStore Controller", func() { Spec: feastdevv1alpha1.FeatureStoreSpec{ FeastProject: referencedRegistry.Spec.FeastProject, Services: &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{}, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, OfflineStore: &feastdevv1alpha1.OfflineStore{}, Registry: &feastdevv1alpha1.Registry{ Remote: &feastdevv1alpha1.RemoteRegistryConfig{ @@ -935,6 +1054,8 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).To(HaveOccurred()) err = k8sClient.Get(ctx, nsName, resource) Expect(err).NotTo(HaveOccurred()) + Expect(resource.Status.Applied.Services.Registry.Remote.FeastRef.Namespace).NotTo(BeEmpty()) + Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType)).To(BeNil()) Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType)).To(BeNil()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.ReadyType)).To(BeFalse()) cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) @@ -950,6 +1071,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).To(HaveOccurred()) err = k8sClient.Get(ctx, nsName, resource) Expect(err).NotTo(HaveOccurred()) + Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType)).To(BeNil()) Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType)).To(BeNil()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.ReadyType)).To(BeFalse()) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) @@ -967,20 +1089,40 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.Get(ctx, nsName, resource) Expect(err).NotTo(HaveOccurred()) + Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType)).To(BeNil()) Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType)).To(BeNil()) - Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.ReadyType)).To(BeTrue()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType)).To(BeTrue()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType)).To(BeTrue()) - Expect(resource.Status.Applied.Services.Registry.Remote.FeastRef.Namespace).To(Equal(resource.Namespace)) Expect(resource.Status.ServiceHostnames.Registry).ToNot(BeEmpty()) Expect(resource.Status.ServiceHostnames.Registry).To(Equal(referencedRegistry.Status.ServiceHostnames.Registry)) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(1)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + // check client config cm := &corev1.ConfigMap{} err = k8sClient.Get(ctx, types.NamespacedName{ @@ -995,11 +1137,6 @@ var _ = Describe("FeatureStore Controller", func() { Project: feastProject, Provider: services.LocalProviderType, EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, - OfflineStore: services.OfflineStoreConfig{ - Host: "feast-" + resource.Name + "-offline.default.svc.cluster.local", - Type: services.OfflineRemoteConfigType, - Port: services.HttpPort, - }, OnlineStore: services.OnlineStoreConfig{ Path: "http://feast-" + resource.Name + "-online.default.svc.cluster.local:80", Type: services.OnlineRemoteConfigType, @@ -1008,9 +1145,30 @@ var _ = Describe("FeatureStore Controller", func() { RegistryType: services.RegistryRemoteConfigType, Path: "feast-" + referencedRegistry.Name + "-registry.default.svc.cluster.local:80", }, + AuthzConfig: noAuthzConfig(), } Expect(repoConfigClient).To(Equal(clientConfig)) + // disable init containers + resource.Spec.Services.DisableInitContainers = true + err = k8sClient.Update(ctx, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: nsName, + }) + Expect(err).NotTo(HaveOccurred()) + + deploy = &appsv1.Deployment{} + objMeta = feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.InitContainers).To(BeEmpty()) + + // break remote reference hostname := "test:80" referencedRegistry.Spec.Services.Registry = &feastdevv1alpha1.Registry{ Remote: &feastdevv1alpha1.RemoteRegistryConfig{ @@ -1032,6 +1190,7 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.Get(ctx, nsName, resource) Expect(err).NotTo(HaveOccurred()) Expect(resource.Status.ServiceHostnames.Registry).To(BeEmpty()) + Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType)).To(BeNil()) Expect(apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType)).To(BeNil()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.ReadyType)).To(BeFalse()) Expect(apimeta.IsStatusConditionTrue(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType)).To(BeTrue()) @@ -1059,18 +1218,21 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) feast := services.FeastServices{ - Client: controllerReconciler.Client, - Context: ctx, - Scheme: controllerReconciler.Scheme, - FeatureStore: resource, + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, } + // check deployment deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() err = k8sClient.Get(ctx, types.NamespacedName{ - Name: feast.GetFeastServiceName(services.OfflineFeastType), - Namespace: resource.Namespace, - }, - deploy) + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) Expect(err).NotTo(HaveOccurred()) err = controllerutil.RemoveControllerReference(resource, deploy, controllerReconciler.Scheme) @@ -1098,14 +1260,17 @@ var _ = Describe("FeatureStore Controller", func() { err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - Expect(resource.Status.Conditions).To(HaveLen(5)) + Expect(resource.Status.Conditions).To(HaveLen(6)) cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) Expect(cond).ToNot(BeNil()) Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) Expect(cond.Status).To(Equal(metav1.ConditionFalse)) Expect(cond.Reason).To(Equal(feastdevv1alpha1.FailedReason)) - Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + name + " is already owned by another Service controller " + name)) + Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + deploy.Name + " is already owned by another Service controller " + name)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.AuthorizationReadyType) + Expect(cond).To(BeNil()) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) Expect(cond).ToNot(BeNil()) @@ -1123,10 +1288,9 @@ var _ = Describe("FeatureStore Controller", func() { cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) Expect(cond).ToNot(BeNil()) - Expect(cond.Status).To(Equal(metav1.ConditionFalse)) - Expect(cond.Reason).To(Equal(feastdevv1alpha1.OfflineStoreFailedReason)) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) - Expect(cond.Message).To(Equal("Error: Object " + resource.Namespace + "/" + name + " is already owned by another Service controller " + name)) cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) Expect(cond).ToNot(BeNil()) @@ -1145,8 +1309,6 @@ var _ = Describe("FeatureStore Controller", func() { err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - Expect(resource.Spec.Services.Registry).To(BeNil()) - resource.Spec.Services.Registry = &feastdevv1alpha1.Registry{} err = k8sClient.Update(ctx, resource) Expect(err).To(HaveOccurred()) @@ -1189,42 +1351,21 @@ var _ = Describe("FeatureStore Controller", func() { }) }) -func createFeatureStoreResource(resourceName string, image string, pullPolicy corev1.PullPolicy, envVars *[]corev1.EnvVar) *feastdevv1alpha1.FeatureStore { - return &feastdevv1alpha1.FeatureStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - Spec: feastdevv1alpha1.FeatureStoreSpec{ - FeastProject: feastProject, - Services: &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{}, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - ServiceConfigs: feastdevv1alpha1.ServiceConfigs{ - DefaultConfigs: feastdevv1alpha1.DefaultConfigs{ - Image: &image, - }, - OptionalConfigs: feastdevv1alpha1.OptionalConfigs{ - Env: envVars, - ImagePullPolicy: &pullPolicy, - Resources: &corev1.ResourceRequirements{}, - }, - }, - }, - }, - }, - } -} - func getFeatureStoreYamlEnvVar(envs []corev1.EnvVar) *corev1.EnvVar { for _, e := range envs { - if e.Name == services.FeatureStoreYamlEnvVar { + if e.Name == services.TmpFeatureStoreYamlEnvVar { return &e } } return nil } +func noAuthzConfig() services.AuthzConfig { + return services.AuthzConfig{ + Type: services.NoAuthAuthType, + } +} + func areEnvVarArraysEqual(arr1 []corev1.EnvVar, arr2 []corev1.EnvVar) bool { if len(arr1) != len(arr2) { return false diff --git a/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go b/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go new file mode 100644 index 00000000000..dcf684c7733 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go @@ -0,0 +1,169 @@ +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +func assertEnvFrom(container corev1.Container) { + envFrom := container.EnvFrom + Expect(envFrom).NotTo(BeNil()) + checkEnvFromCounter := 0 + + for _, source := range envFrom { + if source.ConfigMapRef != nil && source.ConfigMapRef.Name == "example-configmap" { + checkEnvFromCounter += 1 + // Simulate retrieval of ConfigMap data and validate + configMap := &corev1.ConfigMap{} + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Name: source.ConfigMapRef.Name, + Namespace: "default", + }, configMap) + Expect(err).NotTo(HaveOccurred()) + // Validate a specific key-value pair from the ConfigMap + Expect(configMap.Data["example-key"]).To(Equal("example-value")) + } + + if source.SecretRef != nil && source.SecretRef.Name == "example-secret" { + checkEnvFromCounter += 1 + // Simulate retrieval of Secret data and validate + secret := &corev1.Secret{} + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Name: source.SecretRef.Name, + Namespace: "default", + }, secret) + Expect(err).NotTo(HaveOccurred()) + // Validate a specific key-value pair from the Secret + Expect(string(secret.Data["secret-key"])).To(Equal("secret-value")) + } + } + Expect(checkEnvFromCounter).To(Equal(2)) +} + +func createEnvFromSecretAndConfigMap() { + By("creating the config map and secret for envFrom") + envFromConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-configmap", + Namespace: "default", + }, + Data: map[string]string{"example-key": "example-value"}, + } + err := k8sClient.Create(context.TODO(), envFromConfigMap) + Expect(err).ToNot(HaveOccurred()) + + envFromSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-secret", + Namespace: "default", + }, + StringData: map[string]string{"secret-key": "secret-value"}, + } + err = k8sClient.Create(context.TODO(), envFromSecret) + Expect(err).ToNot(HaveOccurred()) +} + +func deleteEnvFromSecretAndConfigMap() { + // Delete ConfigMap + By("Deleting the configmap and secret for envFrom") + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-configmap", + Namespace: "default", + }, + } + err := k8sClient.Delete(context.TODO(), configMap) + Expect(err).ToNot(HaveOccurred()) + + // Delete Secret + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-secret", + Namespace: "default", + }, + } + err = k8sClient.Delete(context.TODO(), secret) + Expect(err).ToNot(HaveOccurred()) +} + +func createFeatureStoreResource(resourceName string, image string, pullPolicy corev1.PullPolicy, envVars *[]corev1.EnvVar, envFromVar *[]corev1.EnvFromSource) *feastdevv1alpha1.FeatureStore { + return &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: feastProject, + Services: &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ + OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + EnvFrom: envFromVar, + }, + }, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + Image: &image, + }, + OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + Env: envVars, + EnvFrom: envFromVar, + ImagePullPolicy: &pullPolicy, + Resources: &corev1.ResourceRequirements{}, + }, + }, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{ + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + Image: &image, + }, + OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + Env: envVars, + EnvFrom: envFromVar, + ImagePullPolicy: &pullPolicy, + Resources: &corev1.ResourceRequirements{}, + }, + }, + }, + }, + }, + } +} + +func withEnvFrom() *[]corev1.EnvFromSource { + + return &[]corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "example-configmap"}, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "example-secret"}, + }, + }, + } + +} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go new file mode 100644 index 00000000000..883cfe940aa --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go @@ -0,0 +1,443 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/base64" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +var _ = Describe("FeatureStore Controller - Feast service TLS", func() { + Context("When reconciling a FeatureStore resource", func() { + const resourceName = "test-tls" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + localRef := corev1.LocalObjectReference{Name: "test"} + tlsConfigs := &feastdevv1alpha1.TlsConfigs{ + SecretRef: &localRef, + } + BeforeEach(func() { + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: feastProject, + Services: &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: tlsConfigs, + }, + }, + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: tlsConfigs, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: tlsConfigs, + }, + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{ + TLS: tlsConfigs, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + Expect(resource.Status).NotTo(BeNil()) + Expect(resource.Status.FeastVersion).To(Equal(feastversion.FeastVersion)) + Expect(resource.Status.ClientConfigMap).To(Equal(feast.GetFeastServiceName(services.ClientFeastType))) + Expect(resource.Status.Applied.FeastProject).To(Equal(resource.Spec.FeastProject)) + Expect(resource.Status.Applied.Services).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OfflineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.OnlineStore).NotTo(BeNil()) + Expect(resource.Status.Applied.Services.Registry).NotTo(BeNil()) + + Expect(resource.Status.ServiceHostnames.OfflineStore).To(Equal(feast.GetFeastServiceName(services.OfflineFeastType) + "." + resource.Namespace + domainTls)) + Expect(resource.Status.ServiceHostnames.OnlineStore).To(Equal(feast.GetFeastServiceName(services.OnlineFeastType) + "." + resource.Namespace + domainTls)) + Expect(resource.Status.ServiceHostnames.Registry).To(Equal(feast.GetFeastServiceName(services.RegistryFeastType) + "." + resource.Namespace + domainTls)) + + Expect(resource.Status.Conditions).NotTo(BeEmpty()) + cond := apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.DeploymentNotAvailableReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.DeploymentNotAvailableMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.RegistryReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.RegistryReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.RegistryReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.ClientReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.ClientReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.ClientReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OfflineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OfflineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OfflineStoreReadyMessage)) + + cond = apimeta.FindStatusCondition(resource.Status.Conditions, feastdevv1alpha1.OnlineStoreReadyType) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(feastdevv1alpha1.ReadyReason)) + Expect(cond.Type).To(Equal(feastdevv1alpha1.OnlineStoreReadyType)) + Expect(cond.Message).To(Equal(feastdevv1alpha1.OnlineStoreReadyMessage)) + + Expect(resource.Status.Phase).To(Equal(feastdevv1alpha1.PendingPhase)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Replicas).To(Equal(&services.DefaultReplicas)) + Expect(controllerutil.HasControllerReference(deploy)).To(BeTrue()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + svc := &corev1.Service{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.RegistryFeastType), + Namespace: resource.Namespace, + }, + svc) + Expect(err).NotTo(HaveOccurred()) + Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) + Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetHttpsPort)))) + }) + + It("should properly encode a feature_store.yaml config", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(1)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(4)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + // check deployment + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + registryContainer := services.GetRegistryContainer(*deploy) + Expect(registryContainer.Env).To(HaveLen(1)) + env := getFeatureStoreYamlEnvVar(registryContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err := feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err := base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfig := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfig) + Expect(err).NotTo(HaveOccurred()) + testConfig := feast.GetDefaultRepoConfig() + testConfig.OfflineStore = services.OfflineStoreConfig{ + Type: services.OfflineFilePersistenceDaskConfigType, + } + Expect(repoConfig).To(Equal(&testConfig)) + + // check offline config + offlineContainer := services.GetOfflineContainer(*deploy) + Expect(offlineContainer.Env).To(HaveLen(1)) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOffline).To(Equal(&testConfig)) + + // check online config + onlineContainer := services.GetOnlineContainer(*deploy) + Expect(onlineContainer.Env).To(HaveLen(1)) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOnline := &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfigOnline).To(Equal(&testConfig)) + Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) + + // check client config + cm := &corev1.ConfigMap{} + name := feast.GetFeastServiceName(services.ClientFeastType) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: name, + Namespace: resource.Namespace, + }, + cm) + Expect(err).NotTo(HaveOccurred()) + repoConfigClient := &services.RepoConfig{} + err = yaml.Unmarshal([]byte(cm.Data[services.FeatureStoreYamlCmKey]), repoConfigClient) + Expect(err).NotTo(HaveOccurred()) + offlineRemote := services.OfflineStoreConfig{ + Host: fmt.Sprintf("feast-%s-offline.default.svc.cluster.local", resourceName), + Type: services.OfflineRemoteConfigType, + Port: services.HttpsPort, + Scheme: services.HttpsScheme, + Cert: services.GetTlsPath(services.OfflineFeastType) + "tls.crt", + } + regRemote := services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: fmt.Sprintf("feast-%s-registry.default.svc.cluster.local:443", resourceName), + Cert: services.GetTlsPath(services.RegistryFeastType) + "tls.crt", + } + clientConfig := &services.RepoConfig{ + Project: feastProject, + Provider: services.LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + OfflineStore: offlineRemote, + OnlineStore: services.OnlineStoreConfig{ + Path: fmt.Sprintf("https://feast-%s-online.default.svc.cluster.local:443", resourceName), + Type: services.OnlineRemoteConfigType, + Cert: services.GetTlsPath(services.OnlineFeastType) + "tls.crt", + }, + Registry: regRemote, + AuthzConfig: noAuthzConfig(), + } + Expect(repoConfigClient).To(Equal(clientConfig)) + + // change tls and reconcile + resourceNew := resource.DeepCopy() + disable := true + remoteRegHost := "test.other-ns:443" + resourceNew.Spec = feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: feastProject, + Services: &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{ + Disable: &disable, + }, + }, + }, + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: tlsConfigs, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Remote: &feastdevv1alpha1.RemoteRegistryConfig{ + Hostname: &remoteRegHost, + TLS: &feastdevv1alpha1.TlsRemoteRegistryConfigs{ + ConfigMapRef: localRef, + CertName: "remote.crt", + }, + }, + }, + }, + } + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + feast.Handler.FeatureStore = resource + + // check registry + deploy = &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(2)) + + // check offline config + offlineContainer = services.GetOfflineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(offlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + repoConfigOffline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOffline) + Expect(err).NotTo(HaveOccurred()) + regRemote = services.RegistryConfig{ + RegistryType: services.RegistryRemoteConfigType, + Path: remoteRegHost, + Cert: services.GetTlsPath(services.RegistryFeastType) + "remote.crt", + } + testConfig.Registry = regRemote + Expect(repoConfigOffline).To(Equal(&testConfig)) + + // check online config + onlineContainer = services.GetOnlineContainer(*deploy) + env = getFeatureStoreYamlEnvVar(onlineContainer.Env) + Expect(env).NotTo(BeNil()) + + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64() + Expect(err).NotTo(HaveOccurred()) + Expect(fsYamlStr).To(Equal(env.Value)) + + envByte, err = base64.StdEncoding.DecodeString(env.Value) + Expect(err).NotTo(HaveOccurred()) + + repoConfigOnline = &services.RepoConfig{} + err = yaml.Unmarshal(envByte, repoConfigOnline) + Expect(err).NotTo(HaveOccurred()) + testConfig.Registry = regRemote + Expect(repoConfigOnline).To(Equal(&testConfig)) + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go b/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go new file mode 100644 index 00000000000..521f18cdc36 --- /dev/null +++ b/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go @@ -0,0 +1,215 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("FeatureStore Controller - Deployment Volumes and VolumeMounts", func() { + Context("When deploying featureStore Spec we should have an option do Volumes and VolumeMounts", func() { + const resourceName = "services-ephemeral" + const offlineType = "duckdb" + var pullPolicy = corev1.PullAlways + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + featurestore := &feastdevv1alpha1.FeatureStore{} + onlineStorePath := "/data/online.db" + registryPath := "/data/registry.db" + + BeforeEach(func() { + By("creating the custom resource for the Kind FeatureStore") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + if err != nil && errors.IsNotFound(err) { + resource := createFeatureStoreVolumeResource(resourceName, image, pullPolicy) + resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + Type: offlineType, + }, + } + resource.Spec.Services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: onlineStorePath, + }, + } + resource.Spec.Services.Registry = &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: registryPath, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + AfterEach(func() { + resource := &feastdevv1alpha1.FeatureStore{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance FeatureStore") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + }) + + It("should successfully reconcile the resource and volumes and volumeMounts should be available", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + deploy := &appsv1.Deployment{} + objMeta := feast.GetObjectMeta() + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, deploy) + + Expect(err).NotTo(HaveOccurred()) + + // Extract the PodSpec from DeploymentSpec + podSpec := deploy.Spec.Template.Spec + + // Validate Volumes + // Validate Volumes - Check if our test volume exists among multiple + Expect(podSpec.Volumes).To(ContainElement(WithTransform(func(v corev1.Volume) string { + return v.Name + }, Equal("test-volume"))), "Expected volume 'test-volume' to be present") + + // Ensure 'online' container has the test volume mount + var onlineContainer *corev1.Container + for i, container := range podSpec.Containers { + if container.Name == "online" { + onlineContainer = &podSpec.Containers[i] + break + } + } + Expect(onlineContainer).ToNot(BeNil(), "Expected to find container 'online'") + + // Validate that 'online' container has the test-volume mount + Expect(onlineContainer.VolumeMounts).To(ContainElement(WithTransform(func(vm corev1.VolumeMount) string { + return vm.Name + }, Equal("test-volume"))), "Expected 'online' container to have volume mount 'test-volume'") + + // Ensure all other containers do NOT have the test volume mount + for _, container := range podSpec.Containers { + if container.Name != "online" { + Expect(container.VolumeMounts).ToNot(ContainElement(WithTransform(func(vm corev1.VolumeMount) string { + return vm.Name + }, Equal("test-volume"))), "Unexpected volume mount 'test-volume' found in container "+container.Name) + } + } + + }) + }) +}) + +func createFeatureStoreVolumeResource(resourceName string, image string, pullPolicy corev1.PullPolicy) *feastdevv1alpha1.FeatureStore { + volume := corev1.Volume{ + Name: "test-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + volumeMount := corev1.VolumeMount{ + Name: "test-volume", + MountPath: "/data", + } + + return &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: feastProject, + Services: &feastdevv1alpha1.FeatureStoreServices{ + Volumes: []corev1.Volume{volume}, + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{}, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + VolumeMounts: []corev1.VolumeMount{volumeMount}, + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + Image: &image, + }, + OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + ImagePullPolicy: &pullPolicy, + Resources: &corev1.ResourceRequirements{}, + }, + }, + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{ + ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + Image: &image, + }, + OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + ImagePullPolicy: &pullPolicy, + Resources: &corev1.ResourceRequirements{}, + }, + }, + }, + }, + }, + } +} diff --git a/infra/feast-operator/internal/controller/handler/handler.go b/infra/feast-operator/internal/controller/handler/handler.go new file mode 100644 index 00000000000..73bacffea47 --- /dev/null +++ b/infra/feast-operator/internal/controller/handler/handler.go @@ -0,0 +1,28 @@ +package handler + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// delete an object if the FeatureStore is set as the object's controller/owner +func (handler *FeastHandler) DeleteOwnedFeastObj(obj client.Object) error { + name := obj.GetName() + kind := obj.GetObjectKind().GroupVersionKind().Kind + if err := handler.Client.Get(handler.Context, client.ObjectKeyFromObject(obj), obj); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + for _, ref := range obj.GetOwnerReferences() { + if *ref.Controller && ref.UID == handler.FeatureStore.UID { + if err := handler.Client.Delete(handler.Context, obj); err != nil { + return err + } + log.FromContext(handler.Context).Info("Successfully deleted", kind, name) + } + } + return nil +} diff --git a/infra/feast-operator/internal/controller/handler/handler_types.go b/infra/feast-operator/internal/controller/handler/handler_types.go new file mode 100644 index 00000000000..5a26776f569 --- /dev/null +++ b/infra/feast-operator/internal/controller/handler/handler_types.go @@ -0,0 +1,20 @@ +package handler + +import ( + "context" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + FeastPrefix = "feast-" +) + +type FeastHandler struct { + client.Client + Context context.Context + Scheme *runtime.Scheme + FeatureStore *feastdevv1alpha1.FeatureStore +} diff --git a/infra/feast-operator/internal/controller/services/client.go b/infra/feast-operator/internal/controller/services/client.go index 1befd2df194..89e22f7be6d 100644 --- a/infra/feast-operator/internal/controller/services/client.go +++ b/infra/feast-operator/internal/controller/services/client.go @@ -30,12 +30,12 @@ func (feast *FeastServices) deployClient() error { } func (feast *FeastServices) createClientConfigMap() error { - logger := log.FromContext(feast.Context) + logger := log.FromContext(feast.Handler.Context) cm := &corev1.ConfigMap{ - ObjectMeta: feast.GetObjectMeta(ClientFeastType), + ObjectMeta: feast.GetObjectMetaType(ClientFeastType), } cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - if op, err := controllerutil.CreateOrUpdate(feast.Context, feast.Client, cm, controllerutil.MutateFn(func() error { + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, cm, controllerutil.MutateFn(func() error { return feast.setClientConfigMap(cm) })); err != nil { return err @@ -46,12 +46,43 @@ func (feast *FeastServices) createClientConfigMap() error { } func (feast *FeastServices) setClientConfigMap(cm *corev1.ConfigMap) error { - cm.Labels = feast.getLabels(ClientFeastType) - clientYaml, err := feast.getClientFeatureStoreYaml() + cm.Labels = feast.getFeastTypeLabels(ClientFeastType) + clientYaml, err := feast.getClientFeatureStoreYaml(feast.extractConfigFromSecret) if err != nil { return err } cm.Data = map[string]string{FeatureStoreYamlCmKey: string(clientYaml)} - feast.FeatureStore.Status.ClientConfigMap = cm.Name - return controllerutil.SetControllerReference(feast.FeatureStore, cm, feast.Scheme) + feast.Handler.FeatureStore.Status.ClientConfigMap = cm.Name + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, cm, feast.Handler.Scheme) +} + +func (feast *FeastServices) createCaConfigMap() error { + logger := log.FromContext(feast.Handler.Context) + cm := feast.initCaConfigMap() + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, cm, controllerutil.MutateFn(func() error { + return feast.setCaConfigMap(cm) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "ConfigMap", cm.Name, "operation", op) + } + return nil +} + +func (feast *FeastServices) setCaConfigMap(cm *corev1.ConfigMap) error { + cm.Labels = map[string]string{ + NameLabelKey: feast.Handler.FeatureStore.Name, + } + cm.Annotations = map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + } + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, cm, feast.Handler.Scheme) +} + +func (feast *FeastServices) initCaConfigMap() *corev1.ConfigMap { + cm := &corev1.ConfigMap{ + ObjectMeta: feast.GetObjectMetaType(ClientCaFeastType), + } + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + return cm } diff --git a/infra/feast-operator/internal/controller/services/repo_config.go b/infra/feast-operator/internal/controller/services/repo_config.go index 3137417f3ac..50ad3b92858 100644 --- a/infra/feast-operator/internal/controller/services/repo_config.go +++ b/infra/feast-operator/internal/controller/services/repo_config.go @@ -18,69 +18,220 @@ package services import ( "encoding/base64" + "fmt" + "path" "strings" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" "gopkg.in/yaml.v3" - corev1 "k8s.io/api/core/v1" ) // GetServiceFeatureStoreYamlBase64 returns a base64 encoded feature_store.yaml config for the feast service -func (feast *FeastServices) GetServiceFeatureStoreYamlBase64(feastType FeastServiceType) (string, error) { - fsYaml, err := feast.getServiceFeatureStoreYaml(feastType) +func (feast *FeastServices) GetServiceFeatureStoreYamlBase64() (string, error) { + fsYaml, err := feast.getServiceFeatureStoreYaml() if err != nil { return "", err } return base64.StdEncoding.EncodeToString(fsYaml), nil } -func (feast *FeastServices) getServiceFeatureStoreYaml(feastType FeastServiceType) ([]byte, error) { - return yaml.Marshal(feast.getServiceRepoConfig(feastType)) +func (feast *FeastServices) getServiceFeatureStoreYaml() ([]byte, error) { + repoConfig, err := feast.getServiceRepoConfig() + if err != nil { + return nil, err + } + return yaml.Marshal(repoConfig) +} + +func (feast *FeastServices) getServiceRepoConfig() (RepoConfig, error) { + return getServiceRepoConfig(feast.Handler.FeatureStore, feast.extractConfigFromSecret) } -func (feast *FeastServices) getServiceRepoConfig(feastType FeastServiceType) RepoConfig { - appliedSpec := feast.FeatureStore.Status.Applied +func getServiceRepoConfig( + featureStore *feastdevv1alpha1.FeatureStore, + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + repoConfig, err := getBaseServiceRepoConfig(featureStore, secretExtractionFunc) + if err != nil { + return repoConfig, err + } - repoConfig := feast.getClientRepoConfig() + appliedSpec := featureStore.Status.Applied if appliedSpec.Services != nil { - // Offline server has an `offline_store` section and a remote `registry` - if feastType == OfflineFeastType && appliedSpec.Services.OfflineStore != nil { - repoConfig.OfflineStore = OfflineStoreConfig{ - Type: OfflineDaskConfigType, + services := appliedSpec.Services + if services.OfflineStore != nil { + err := setRepoConfigOffline(services, secretExtractionFunc, &repoConfig) + if err != nil { + return repoConfig, err } - repoConfig.OnlineStore = OnlineStoreConfig{} } - // Online server has an `online_store` section, a remote `registry` and a remote `offline_store` - if feastType == OnlineFeastType && appliedSpec.Services.OnlineStore != nil { - repoConfig.OnlineStore = OnlineStoreConfig{ - Type: OnlineSqliteConfigType, - Path: LocalOnlinePath, + if services.OnlineStore != nil { + err := setRepoConfigOnline(services, secretExtractionFunc, &repoConfig) + if err != nil { + return repoConfig, err } } - // Registry server only has a `registry` section - if feastType == RegistryFeastType && feast.isLocalRegistry() { - repoConfig.Registry = RegistryConfig{ - RegistryType: RegistryFileConfigType, - Path: LocalRegistryPath, + if IsLocalRegistry(featureStore) { + err := setRepoConfigRegistry(services, secretExtractionFunc, &repoConfig) + if err != nil { + return repoConfig, err } - repoConfig.OfflineStore = OfflineStoreConfig{} - repoConfig.OnlineStore = OnlineStoreConfig{} } } - return repoConfig + return repoConfig, nil } -func (feast *FeastServices) getClientFeatureStoreYaml() ([]byte, error) { - return yaml.Marshal(feast.getClientRepoConfig()) +func getBaseServiceRepoConfig( + featureStore *feastdevv1alpha1.FeatureStore, + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + + repoConfig := defaultRepoConfig(featureStore) + clientRepoConfig, err := getClientRepoConfig(featureStore, secretExtractionFunc) + if err != nil { + return repoConfig, err + } + if isRemoteRegistry(featureStore) { + repoConfig.Registry = clientRepoConfig.Registry + } + repoConfig.AuthzConfig = clientRepoConfig.AuthzConfig + + appliedSpec := featureStore.Status.Applied + if appliedSpec.AuthzConfig != nil && appliedSpec.AuthzConfig.OidcAuthz != nil { + propertiesMap, authSecretErr := secretExtractionFunc("", appliedSpec.AuthzConfig.OidcAuthz.SecretRef.Name, "") + if authSecretErr != nil { + return repoConfig, authSecretErr + } + + oidcServerProperties := map[string]interface{}{} + for _, oidcServerProperty := range OidcServerProperties { + if val, exists := propertiesMap[string(oidcServerProperty)]; exists { + oidcServerProperties[string(oidcServerProperty)] = val + } else { + return repoConfig, missingOidcSecretProperty(oidcServerProperty) + } + } + repoConfig.AuthzConfig.OidcParameters = oidcServerProperties + } + + return repoConfig, nil } -func (feast *FeastServices) getClientRepoConfig() RepoConfig { - status := feast.FeatureStore.Status - clientRepoConfig := RepoConfig{ - Project: status.Applied.FeastProject, - Provider: LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, +func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { + registryPersistence := services.Registry.Local.Persistence + + if registryPersistence != nil { + filePersistence := registryPersistence.FilePersistence + dbPersistence := registryPersistence.DBPersistence + + if filePersistence != nil { + repoConfig.Registry.RegistryType = RegistryFileConfigType + repoConfig.Registry.Path = getActualPath(filePersistence.Path, filePersistence.PvcConfig) + repoConfig.Registry.S3AdditionalKwargs = filePersistence.S3AdditionalKwargs + } else if dbPersistence != nil && len(dbPersistence.Type) > 0 { + repoConfig.Registry.Path = "" + repoConfig.Registry.RegistryType = RegistryConfigType(dbPersistence.Type) + secretKeyName := dbPersistence.SecretKeyName + if len(secretKeyName) == 0 { + secretKeyName = string(repoConfig.Registry.RegistryType) + } + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) + if err != nil { + return err + } + + err = mergeStructWithDBParametersMap(¶metersMap, &repoConfig.Registry) + if err != nil { + return err + } + + repoConfig.Registry.DBParameters = parametersMap + } + } + return nil +} + +func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { + onlineStorePersistence := services.OnlineStore.Persistence + + if onlineStorePersistence != nil { + filePersistence := onlineStorePersistence.FilePersistence + dbPersistence := onlineStorePersistence.DBPersistence + + if filePersistence != nil { + repoConfig.OnlineStore.Path = getActualPath(filePersistence.Path, filePersistence.PvcConfig) + } else if dbPersistence != nil && len(dbPersistence.Type) > 0 { + repoConfig.OnlineStore.Path = "" + repoConfig.OnlineStore.Type = OnlineConfigType(dbPersistence.Type) + secretKeyName := dbPersistence.SecretKeyName + if len(secretKeyName) == 0 { + secretKeyName = string(repoConfig.OnlineStore.Type) + } + + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) + if err != nil { + return err + } + + err = mergeStructWithDBParametersMap(¶metersMap, &repoConfig.OnlineStore) + if err != nil { + return err + } + + repoConfig.OnlineStore.DBParameters = parametersMap + } + } + return nil +} + +func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { + repoConfig.OfflineStore = defaultOfflineStoreConfig + offlineStorePersistence := services.OfflineStore.Persistence + + if offlineStorePersistence != nil { + dbPersistence := offlineStorePersistence.DBPersistence + filePersistence := offlineStorePersistence.FilePersistence + + if filePersistence != nil && len(filePersistence.Type) > 0 { + repoConfig.OfflineStore.Type = OfflineConfigType(filePersistence.Type) + } else if offlineStorePersistence.DBPersistence != nil && len(dbPersistence.Type) > 0 { + repoConfig.OfflineStore.Type = OfflineConfigType(dbPersistence.Type) + secretKeyName := dbPersistence.SecretKeyName + if len(secretKeyName) == 0 { + secretKeyName = string(repoConfig.OfflineStore.Type) + } + + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) + if err != nil { + return err + } + + err = mergeStructWithDBParametersMap(¶metersMap, &repoConfig.OfflineStore) + if err != nil { + return err + } + + repoConfig.OfflineStore.DBParameters = parametersMap + } + } + return nil +} + +func (feast *FeastServices) getClientFeatureStoreYaml(secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) ([]byte, error) { + clientRepo, err := getClientRepoConfig(feast.Handler.FeatureStore, secretExtractionFunc) + if err != nil { + return []byte{}, err + } + return yaml.Marshal(clientRepo) +} + +func getClientRepoConfig( + featureStore *feastdevv1alpha1.FeatureStore, + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + status := featureStore.Status + appliedServices := status.Applied.Services + clientRepoConfig, err := getRepoConfig(featureStore, secretExtractionFunc) + if err != nil { + return clientRepoConfig, err } if len(status.ServiceHostnames.OfflineStore) > 0 { clientRepoConfig.OfflineStore = OfflineStoreConfig{ @@ -88,11 +239,23 @@ func (feast *FeastServices) getClientRepoConfig() RepoConfig { Host: strings.Split(status.ServiceHostnames.OfflineStore, ":")[0], Port: HttpPort, } + if appliedServices.OfflineStore != nil && + appliedServices.OfflineStore.Server != nil && appliedServices.OfflineStore.Server.TLS.IsTLS() { + clientRepoConfig.OfflineStore.Cert = GetTlsPath(OfflineFeastType) + appliedServices.OfflineStore.Server.TLS.SecretKeyNames.TlsCrt + clientRepoConfig.OfflineStore.Port = HttpsPort + clientRepoConfig.OfflineStore.Scheme = HttpsScheme + } } if len(status.ServiceHostnames.OnlineStore) > 0 { + onlinePath := "://" + status.ServiceHostnames.OnlineStore clientRepoConfig.OnlineStore = OnlineStoreConfig{ Type: OnlineRemoteConfigType, - Path: strings.ToLower(string(corev1.URISchemeHTTP)) + "://" + status.ServiceHostnames.OnlineStore, + Path: HttpScheme + onlinePath, + } + if appliedServices.OnlineStore != nil && + appliedServices.OnlineStore.Server != nil && appliedServices.OnlineStore.Server.TLS.IsTLS() { + clientRepoConfig.OnlineStore.Cert = GetTlsPath(OnlineFeastType) + appliedServices.OnlineStore.Server.TLS.SecretKeyNames.TlsCrt + clientRepoConfig.OnlineStore.Path = HttpsScheme + onlinePath } } if len(status.ServiceHostnames.Registry) > 0 { @@ -100,6 +263,155 @@ func (feast *FeastServices) getClientRepoConfig() RepoConfig { RegistryType: RegistryRemoteConfigType, Path: status.ServiceHostnames.Registry, } + if localRegistryTls(featureStore) { + clientRepoConfig.Registry.Cert = GetTlsPath(RegistryFeastType) + appliedServices.Registry.Local.Server.TLS.SecretKeyNames.TlsCrt + } else if remoteRegistryTls(featureStore) { + clientRepoConfig.Registry.Cert = GetTlsPath(RegistryFeastType) + appliedServices.Registry.Remote.TLS.CertName + } + } + + return clientRepoConfig, nil +} + +func getRepoConfig( + featureStore *feastdevv1alpha1.FeatureStore, + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + status := featureStore.Status + repoConfig := initRepoConfig(status.Applied.FeastProject) + if status.Applied.AuthzConfig != nil { + if status.Applied.AuthzConfig.KubernetesAuthz != nil { + repoConfig.AuthzConfig = AuthzConfig{ + Type: KubernetesAuthType, + } + } else if status.Applied.AuthzConfig.OidcAuthz != nil { + repoConfig.AuthzConfig = AuthzConfig{ + Type: OidcAuthType, + } + + propertiesMap, err := secretExtractionFunc("", status.Applied.AuthzConfig.OidcAuthz.SecretRef.Name, "") + if err != nil { + return repoConfig, err + } + + oidcClientProperties := map[string]interface{}{} + for _, oidcClientProperty := range OidcClientProperties { + if val, exists := propertiesMap[string(oidcClientProperty)]; exists { + oidcClientProperties[string(oidcClientProperty)] = val + } else { + return repoConfig, missingOidcSecretProperty(oidcClientProperty) + } + } + repoConfig.AuthzConfig.OidcParameters = oidcClientProperties + } + } + return repoConfig, nil +} + +func getActualPath(filePath string, pvcConfig *feastdevv1alpha1.PvcConfig) string { + if pvcConfig == nil { + return filePath + } + return path.Join(pvcConfig.MountPath, filePath) +} + +func (feast *FeastServices) extractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + secret, err := feast.getSecret(secretRef) + if err != nil { + return nil, err + } + parameters := map[string]interface{}{} + + if secretKeyName != "" { + val, exists := secret.Data[secretKeyName] + if !exists { + return nil, fmt.Errorf("secret key %s doesn't exist in secret %s", secretKeyName, secretRef) + } + + err = yaml.Unmarshal(val, ¶meters) + if err != nil { + return nil, fmt.Errorf("secret %s contains invalid value", secretKeyName) + } + + typeVal, typeExists := parameters["type"] + if typeExists && storeType != typeVal { + return nil, fmt.Errorf("secret key %s in secret %s contains tag named type with value %s", secretKeyName, secretRef, typeVal) + } + + typeVal, typeExists = parameters["registry_type"] + if typeExists && storeType != typeVal { + return nil, fmt.Errorf("secret key %s in secret %s contains tag named registry_type with value %s", secretKeyName, secretRef, typeVal) + } + } else { + for k, v := range secret.Data { + var val interface{} + err := yaml.Unmarshal(v, &val) + if err != nil { + return nil, fmt.Errorf("secret %s contains invalid value %v", k, v) + } + parameters[k] = val + } } - return clientRepoConfig + + return parameters, nil +} + +func mergeStructWithDBParametersMap(parametersMap *map[string]interface{}, s interface{}) error { + for key, val := range *parametersMap { + hasAttribute, err := hasAttrib(s, key, val) + if err != nil { + return err + } + + if hasAttribute { + delete(*parametersMap, key) + } + } + + return nil +} + +func (feast *FeastServices) GetDefaultRepoConfig() RepoConfig { + return defaultRepoConfig(feast.Handler.FeatureStore) +} + +func defaultRepoConfig(featureStore *feastdevv1alpha1.FeatureStore) RepoConfig { + repoConfig := initRepoConfig(featureStore.Status.Applied.FeastProject) + repoConfig.OnlineStore = defaultOnlineStoreConfig(featureStore) + repoConfig.Registry = defaultRegistryConfig(featureStore) + return repoConfig +} + +func (feast *FeastServices) GetInitRepoConfig() RepoConfig { + return initRepoConfig(feast.Handler.FeatureStore.Status.Applied.FeastProject) +} + +func initRepoConfig(feastProject string) RepoConfig { + return RepoConfig{ + Project: feastProject, + Provider: LocalProviderType, + EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + AuthzConfig: defaultAuthzConfig, + } +} + +func defaultOnlineStoreConfig(featureStore *feastdevv1alpha1.FeatureStore) OnlineStoreConfig { + return OnlineStoreConfig{ + Type: OnlineSqliteConfigType, + Path: defaultOnlineStorePath(featureStore), + } +} + +func defaultRegistryConfig(featureStore *feastdevv1alpha1.FeatureStore) RegistryConfig { + return RegistryConfig{ + RegistryType: RegistryFileConfigType, + Path: defaultRegistryPath(featureStore), + } +} + +var defaultOfflineStoreConfig = OfflineStoreConfig{ + Type: OfflineFilePersistenceDaskConfigType, +} + +var defaultAuthzConfig = AuthzConfig{ + Type: NoAuthAuthType, } diff --git a/infra/feast-operator/internal/controller/services/repo_config_test.go b/infra/feast-operator/internal/controller/services/repo_config_test.go new file mode 100644 index 00000000000..a346ac72e80 --- /dev/null +++ b/infra/feast-operator/internal/controller/services/repo_config_test.go @@ -0,0 +1,424 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" +) + +var projectName = "test-project" + +var _ = Describe("Repo Config", func() { + Context("When creating the RepoConfig of a FeatureStore", func() { + It("should successfully create the repo configs", func() { + By("Having the minimal created resource") + featureStore := minimalFeatureStore() + ApplyDefaultsToStatus(featureStore) + + expectedRegistryConfig := RegistryConfig{ + RegistryType: "file", + Path: EphemeralPath + "/" + DefaultRegistryPath, + } + expectedOnlineConfig := OnlineStoreConfig{ + Type: "sqlite", + Path: EphemeralPath + "/" + DefaultOnlineStorePath, + } + + repoConfig, err := getServiceRepoConfig(featureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(NoAuthAuthType)) + Expect(repoConfig.OfflineStore).To(Equal(emptyOfflineStoreConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) + + By("Having the local registry resource") + featureStore = minimalFeatureStore() + testPath := "/test/file.db" + featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: testPath, + }, + }, + }, + }, + } + ApplyDefaultsToStatus(featureStore) + + expectedRegistryConfig = RegistryConfig{ + RegistryType: "file", + Path: testPath, + } + + repoConfig, err = getServiceRepoConfig(featureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(NoAuthAuthType)) + Expect(repoConfig.OfflineStore).To(Equal(emptyOfflineStoreConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) + + By("Adding an offlineStore with PVC") + featureStore.Spec.Services.OfflineStore = &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{ + MountPath: "/testing", + }, + }, + }, + } + ApplyDefaultsToStatus(featureStore) + appliedServices := featureStore.Status.Applied.Services + Expect(appliedServices.OnlineStore).NotTo(BeNil()) + Expect(appliedServices.Registry.Local).NotTo(BeNil()) + + repoConfig, err = getServiceRepoConfig(featureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.OfflineStore).To(Equal(defaultOfflineStoreConfig)) + Expect(repoConfig.AuthzConfig.Type).To(Equal(NoAuthAuthType)) + Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + + By("Having the remote registry resource") + featureStore = minimalFeatureStore() + featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Remote: &feastdevv1alpha1.RemoteRegistryConfig{ + FeastRef: &feastdevv1alpha1.FeatureStoreRef{ + Name: "registry", + }, + }, + }, + } + ApplyDefaultsToStatus(featureStore) + repoConfig, err = getServiceRepoConfig(featureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(NoAuthAuthType)) + Expect(repoConfig.OfflineStore).To(Equal(emptyOfflineStoreConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + Expect(repoConfig.Registry).To(Equal(emptyRegistryConfig)) + + By("Having the all the file services") + featureStore = minimalFeatureStore() + featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + Type: "duckdb", + }, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: "/data/online.db", + }, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: "/data/registry.db", + }, + }, + }, + }, + } + ApplyDefaultsToStatus(featureStore) + + expectedOfflineConfig := OfflineStoreConfig{ + Type: "duckdb", + } + expectedRegistryConfig = RegistryConfig{ + RegistryType: "file", + Path: "/data/registry.db", + } + expectedOnlineConfig = OnlineStoreConfig{ + Type: "sqlite", + Path: "/data/online.db", + } + + repoConfig, err = getServiceRepoConfig(featureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(NoAuthAuthType)) + Expect(repoConfig.OfflineStore).To(Equal(expectedOfflineConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) + + By("Having kubernetes authorization") + featureStore = minimalFeatureStore() + featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ + KubernetesAuthz: &feastdevv1alpha1.KubernetesAuthz{}, + } + featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{}, + OnlineStore: &feastdevv1alpha1.OnlineStore{}, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{}, + }, + } + ApplyDefaultsToStatus(featureStore) + + expectedOfflineConfig = OfflineStoreConfig{ + Type: "dask", + } + + repoConfig, err = getServiceRepoConfig(featureStore, mockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(KubernetesAuthType)) + Expect(repoConfig.OfflineStore).To(Equal(expectedOfflineConfig)) + Expect(repoConfig.OnlineStore).To(Equal(defaultOnlineStoreConfig(featureStore))) + Expect(repoConfig.Registry).To(Equal(defaultRegistryConfig(featureStore))) + + By("Having oidc authorization") + featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ + OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + SecretRef: corev1.LocalObjectReference{ + Name: "oidc-secret", + }, + }, + } + ApplyDefaultsToStatus(featureStore) + + secretExtractionFunc := mockOidcConfigFromSecret(map[string]interface{}{ + string(OidcAuthDiscoveryUrl): "discovery-url", + string(OidcClientId): "client-id", + string(OidcClientSecret): "client-secret", + string(OidcUsername): "username", + string(OidcPassword): "password"}) + repoConfig, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(OidcAuthType)) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveLen(2)) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveKey(string(OidcClientId))) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveKey(string(OidcAuthDiscoveryUrl))) + Expect(repoConfig.OfflineStore).To(Equal(expectedOfflineConfig)) + Expect(repoConfig.OnlineStore).To(Equal(defaultOnlineStoreConfig(featureStore))) + Expect(repoConfig.Registry).To(Equal(defaultRegistryConfig(featureStore))) + + repoConfig, err = getClientRepoConfig(featureStore, secretExtractionFunc) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.AuthzConfig.Type).To(Equal(OidcAuthType)) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveLen(3)) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveKey(string(OidcClientSecret))) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveKey(string(OidcUsername))) + Expect(repoConfig.AuthzConfig.OidcParameters).To(HaveKey(string(OidcPassword))) + + By("Having the all the db services") + featureStore = minimalFeatureStore() + featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ + Type: string(OfflineDBPersistenceSnowflakeConfigType), + SecretRef: corev1.LocalObjectReference{ + Name: "offline-test-secret", + }, + }, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OnlineStoreDBStorePersistence{ + Type: string(OnlineDBPersistenceSnowflakeConfigType), + SecretRef: corev1.LocalObjectReference{ + Name: "online-test-secret", + }, + }, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + DBPersistence: &feastdevv1alpha1.RegistryDBStorePersistence{ + Type: string(RegistryDBPersistenceSnowflakeConfigType), + SecretRef: corev1.LocalObjectReference{ + Name: "registry-test-secret", + }, + }, + }, + }, + }, + } + parameterMap := createParameterMap() + ApplyDefaultsToStatus(featureStore) + featureStore.Spec.Services.OfflineStore.Persistence.FilePersistence = nil + featureStore.Spec.Services.OnlineStore.Persistence.FilePersistence = nil + featureStore.Spec.Services.Registry.Local.Persistence.FilePersistence = nil + repoConfig, err = getServiceRepoConfig(featureStore, mockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + newMap := CopyMap(parameterMap) + port := parameterMap["port"].(int) + delete(newMap, "port") + expectedOfflineConfig = OfflineStoreConfig{ + Type: OfflineDBPersistenceSnowflakeConfigType, + Port: port, + DBParameters: newMap, + } + expectedOnlineConfig = OnlineStoreConfig{ + Type: OnlineDBPersistenceSnowflakeConfigType, + DBParameters: CopyMap(parameterMap), + } + expectedRegistryConfig = RegistryConfig{ + RegistryType: RegistryDBPersistenceSnowflakeConfigType, + DBParameters: parameterMap, + } + Expect(repoConfig.OfflineStore).To(Equal(expectedOfflineConfig)) + Expect(repoConfig.OnlineStore).To(Equal(expectedOnlineConfig)) + Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) + }) + }) + It("should fail to create the repo configs", func() { + featureStore := minimalFeatureStore() + + By("Having invalid server oidc authorization") + featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ + OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + SecretRef: corev1.LocalObjectReference{ + Name: "oidc-secret", + }, + }, + } + ApplyDefaultsToStatus(featureStore) + + secretExtractionFunc := mockOidcConfigFromSecret(map[string]interface{}{ + string(OidcClientId): "client-id", + string(OidcClientSecret): "client-secret", + string(OidcUsername): "username", + string(OidcPassword): "password"}) + _, err := getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getClientRepoConfig(featureStore, secretExtractionFunc) + Expect(err).ToNot(HaveOccurred()) + + By("Having invalid client oidc authorization") + featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ + OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + SecretRef: corev1.LocalObjectReference{ + Name: "oidc-secret", + }, + }, + } + ApplyDefaultsToStatus(featureStore) + + secretExtractionFunc = mockOidcConfigFromSecret(map[string]interface{}{ + string(OidcAuthDiscoveryUrl): "discovery-url", + string(OidcClientId): "client-id", + string(OidcUsername): "username", + string(OidcPassword): "password"}) + _, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getServiceRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + _, err = getClientRepoConfig(featureStore, secretExtractionFunc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("missing OIDC secret")) + }) +}) + +var emptyOfflineStoreConfig = OfflineStoreConfig{} +var emptyRegistryConfig = RegistryConfig{} + +func minimalFeatureStore() *feastdevv1alpha1.FeatureStore { + return &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: projectName, + }, + } +} + +func minimalFeatureStoreWithAllServers() *feastdevv1alpha1.FeatureStore { + feast := minimalFeatureStore() + // onlineStore configured by default + feast.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{}, + } + return feast +} + +func emptyMockExtractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + return map[string]interface{}{}, nil +} + +func mockExtractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + return createParameterMap(), nil +} + +func mockOidcConfigFromSecret( + oidcProperties map[string]interface{}) func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + return func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + return oidcProperties, nil + } +} + +func createParameterMap() map[string]interface{} { + yamlString := ` +hosts: + - 192.168.1.1 + - 192.168.1.2 + - 192.168.1.3 +keyspace: KeyspaceName +port: 9042 +username: user +password: secret +protocol_version: 5 +load_balancing: + local_dc: datacenter1 + load_balancing_policy: TokenAwarePolicy(DCAwareRoundRobinPolicy) +read_concurrency: 100 +write_concurrency: 100 +` + var parameters map[string]interface{} + + err := yaml.Unmarshal([]byte(yamlString), ¶meters) + if err != nil { + fmt.Println(err) + } + return parameters +} diff --git a/infra/feast-operator/internal/controller/services/services.go b/infra/feast-operator/internal/controller/services/services.go index 5e1778322f3..d6d943568d7 100644 --- a/infra/feast-operator/internal/controller/services/services.go +++ b/infra/feast-operator/internal/controller/services/services.go @@ -22,6 +22,9 @@ import ( "strings" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + routev1 "github.com/openshift/api/route/v1" + + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,77 +37,222 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Apply defaults and set service hostnames in FeatureStore status +func (feast *FeastServices) ApplyDefaults() error { + ApplyDefaultsToStatus(feast.Handler.FeatureStore) + if err := feast.setTlsDefaults(); err != nil { + return err + } + if err := feast.setServiceHostnames(); err != nil { + return err + } + return nil +} + // Deploy the feast services func (feast *FeastServices) Deploy() error { - if err := feast.setServiceHostnames(); err != nil { + if feast.noLocalCoreServerConfigured() { + return errors.New("at least one local server must be configured. e.g. registry / online / offline") + } + openshiftTls, err := feast.checkOpenshiftTls() + if err != nil { + return err + } + if openshiftTls { + if err := feast.createCaConfigMap(); err != nil { + return err + } + } else { + _ = feast.Handler.DeleteOwnedFeastObj(feast.initCaConfigMap()) + } + + services := feast.Handler.FeatureStore.Status.Applied.Services + if feast.isOfflineStore() { + err := feast.validateOfflineStorePersistence(services.OfflineStore.Persistence) + if err != nil { + return err + } + + if err = feast.deployFeastServiceByType(OfflineFeastType); err != nil { + return err + } + } else { + if err := feast.removeFeastServiceByType(OfflineFeastType); err != nil { + return err + } + } + + if feast.isOnlineStore() { + err := feast.validateOnlineStorePersistence(services.OnlineStore.Persistence) + if err != nil { + return err + } + + if err = feast.deployFeastServiceByType(OnlineFeastType); err != nil { + return err + } + } else { + if err := feast.removeFeastServiceByType(OnlineFeastType); err != nil { + return err + } + } + + if feast.isLocalRegistry() { + err := feast.validateRegistryPersistence(services.Registry.Local.Persistence) + if err != nil { + return err + } + + if err = feast.deployFeastServiceByType(RegistryFeastType); err != nil { + return err + } + } else { + if err := feast.removeFeastServiceByType(RegistryFeastType); err != nil { + return err + } + } + if feast.isUiServer() { + if err = feast.deployFeastServiceByType(UIFeastType); err != nil { + return err + } + if err = feast.createRoute(UIFeastType); err != nil { + return err + } + } else { + if err := feast.removeFeastServiceByType(UIFeastType); err != nil { + return err + } + if err := feast.removeRoute(UIFeastType); err != nil { + return err + } + } + + if err := feast.createServiceAccount(); err != nil { + return err + } + if err := feast.createDeployment(); err != nil { + return err + } + if err := feast.deployClient(); err != nil { return err } - services := feast.FeatureStore.Status.Applied.Services - if services != nil { - if services.OfflineStore != nil { - if err := feast.deployFeastServiceByType(OfflineFeastType); err != nil { + return nil +} + +func (feast *FeastServices) validateRegistryPersistence(registryPersistence *feastdevv1alpha1.RegistryPersistence) error { + if registryPersistence != nil { + dbPersistence := registryPersistence.DBPersistence + + if dbPersistence != nil && len(dbPersistence.Type) > 0 { + if err := checkRegistryDBStorePersistenceType(dbPersistence.Type); err != nil { return err } - } else { - if err := feast.removeFeastServiceByType(OfflineFeastType); err != nil { - return err + + if len(dbPersistence.SecretRef.Name) > 0 { + secretRef := dbPersistence.SecretRef.Name + if _, err := feast.getSecret(secretRef); err != nil { + return err + } } } + } - if services.OnlineStore != nil { - if err := feast.deployFeastServiceByType(OnlineFeastType); err != nil { + return nil +} + +func (feast *FeastServices) validateOnlineStorePersistence(onlinePersistence *feastdevv1alpha1.OnlineStorePersistence) error { + if onlinePersistence != nil { + dbPersistence := onlinePersistence.DBPersistence + + if dbPersistence != nil && len(dbPersistence.Type) > 0 { + if err := checkOnlineStoreDBStorePersistenceType(dbPersistence.Type); err != nil { return err } - } else { - if err := feast.removeFeastServiceByType(OnlineFeastType); err != nil { - return err + + if len(dbPersistence.SecretRef.Name) > 0 { + secretRef := dbPersistence.SecretRef.Name + if _, err := feast.getSecret(secretRef); err != nil { + return err + } } } + } - if feast.isLocalRegistry() { - if err := feast.deployFeastServiceByType(RegistryFeastType); err != nil { + return nil +} + +func (feast *FeastServices) validateOfflineStorePersistence(offlinePersistence *feastdevv1alpha1.OfflineStorePersistence) error { + if offlinePersistence != nil { + filePersistence := offlinePersistence.FilePersistence + dbPersistence := offlinePersistence.DBPersistence + + if filePersistence != nil && len(filePersistence.Type) > 0 { + if err := checkOfflineStoreFilePersistenceType(filePersistence.Type); err != nil { return err } - } else { - if err := feast.removeFeastServiceByType(RegistryFeastType); err != nil { + } else if dbPersistence != nil && + len(dbPersistence.Type) > 0 { + if err := checkOfflineStoreDBStorePersistenceType(dbPersistence.Type); err != nil { return err } - } - } - if err := feast.deployClient(); err != nil { - return err + if len(dbPersistence.SecretRef.Name) > 0 { + secretRef := dbPersistence.SecretRef.Name + if _, err := feast.getSecret(secretRef); err != nil { + return err + } + } + } } return nil } func (feast *FeastServices) deployFeastServiceByType(feastType FeastServiceType) error { - if err := feast.createService(feastType); err != nil { - return feast.setFeastServiceCondition(err, feastType) + if pvcCreate, shouldCreate := shouldCreatePvc(feast.Handler.FeatureStore, feastType); shouldCreate { + if err := feast.createPVC(pvcCreate, feastType); err != nil { + return feast.setFeastServiceCondition(err, feastType) + } + } else { + _ = feast.Handler.DeleteOwnedFeastObj(feast.initPVC(feastType)) } - if err := feast.createDeployment(feastType); err != nil { - return feast.setFeastServiceCondition(err, feastType) + if serviceConfig := feast.getServerConfigs(feastType); serviceConfig != nil { + if err := feast.createService(feastType); err != nil { + return feast.setFeastServiceCondition(err, feastType) + } + } else { + _ = feast.Handler.DeleteOwnedFeastObj(feast.initFeastSvc(feastType)) } return feast.setFeastServiceCondition(nil, feastType) } func (feast *FeastServices) removeFeastServiceByType(feastType FeastServiceType) error { - if err := feast.deleteOwnedFeastObj(feast.initFeastDeploy(feastType)); err != nil { + if err := feast.Handler.DeleteOwnedFeastObj(feast.initFeastSvc(feastType)); err != nil { return err } - if err := feast.deleteOwnedFeastObj(feast.initFeastSvc(feastType)); err != nil { + if err := feast.Handler.DeleteOwnedFeastObj(feast.initPVC(feastType)); err != nil { + return err + } + apimeta.RemoveStatusCondition(&feast.Handler.FeatureStore.Status.Conditions, FeastServiceConditions[feastType][metav1.ConditionTrue].Type) + return nil +} + +func (feast *FeastServices) removeRoute(feastType FeastServiceType) error { + if !isOpenShift { + return nil + } + route := feast.initRoute(feastType) + if err := feast.Handler.DeleteOwnedFeastObj(route); err != nil { return err } - apimeta.RemoveStatusCondition(&feast.FeatureStore.Status.Conditions, FeastServiceConditions[feastType][metav1.ConditionTrue].Type) return nil } func (feast *FeastServices) createService(feastType FeastServiceType) error { - logger := log.FromContext(feast.Context) + logger := log.FromContext(feast.Handler.Context) svc := feast.initFeastSvc(feastType) - if op, err := controllerutil.CreateOrUpdate(feast.Context, feast.Client, svc, controllerutil.MutateFn(func() error { + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, svc, controllerutil.MutateFn(func() error { return feast.setService(svc, feastType) })); err != nil { return err @@ -114,11 +262,24 @@ func (feast *FeastServices) createService(feastType FeastServiceType) error { return nil } -func (feast *FeastServices) createDeployment(feastType FeastServiceType) error { - logger := log.FromContext(feast.Context) - deploy := feast.initFeastDeploy(feastType) - if op, err := controllerutil.CreateOrUpdate(feast.Context, feast.Client, deploy, controllerutil.MutateFn(func() error { - return feast.setDeployment(deploy, feastType) +func (feast *FeastServices) createServiceAccount() error { + logger := log.FromContext(feast.Handler.Context) + sa := feast.initFeastSA() + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, sa, controllerutil.MutateFn(func() error { + return feast.setServiceAccount(sa) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "ServiceAccount", sa.Name, "operation", op) + } + return nil +} + +func (feast *FeastServices) createDeployment() error { + logger := log.FromContext(feast.Handler.Context) + deploy := feast.initFeastDeploy() + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, deploy, controllerutil.MutateFn(func() error { + return feast.setDeployment(deploy) })); err != nil { return err } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { @@ -128,147 +289,426 @@ func (feast *FeastServices) createDeployment(feastType FeastServiceType) error { return nil } -func (feast *FeastServices) setDeployment(deploy *appsv1.Deployment, feastType FeastServiceType) error { - fsYamlB64, err := feast.GetServiceFeatureStoreYamlBase64(feastType) +func (feast *FeastServices) createRoute(feastType FeastServiceType) error { + logger := log.FromContext(feast.Handler.Context) + if !isOpenShift { + return nil + } + logger.Info("Reconciling route for Feast service", "ServiceType", feastType) + route := feast.initRoute(feastType) + if op, err := controllerutil.CreateOrUpdate(feast.Handler.Context, feast.Handler.Client, route, controllerutil.MutateFn(func() error { + return feast.setRoute(route, feastType) + })); err != nil { + return err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated { + logger.Info("Successfully reconciled", "Route", route.Name, "operation", op) + } + + return nil +} + +func (feast *FeastServices) createPVC(pvcCreate *feastdevv1alpha1.PvcCreate, feastType FeastServiceType) error { + logger := log.FromContext(feast.Handler.Context) + pvc, err := feast.createNewPVC(pvcCreate, feastType) if err != nil { return err } - deploy.Labels = feast.getLabels(feastType) - deploySettings := FeastServiceConstants[feastType] - serviceConfigs := feast.getServiceConfigs(feastType) - defaultServiceConfigs := serviceConfigs.DefaultConfigs - // standard configs are applied here - probeHandler := corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(int(deploySettings.TargetPort)), - }, + // PVCs are immutable, so we only create... we don't update an existing one. + err = feast.Handler.Client.Get(feast.Handler.Context, client.ObjectKeyFromObject(pvc), pvc) + if err != nil && apierrors.IsNotFound(err) { + err = feast.Handler.Client.Create(feast.Handler.Context, pvc) + if err != nil { + return err + } + logger.Info("Successfully created", "PersistentVolumeClaim", pvc.Name) } + + return nil +} + +func (feast *FeastServices) setDeployment(deploy *appsv1.Deployment) error { + deploy.Labels = feast.getLabels() deploy.Spec = appsv1.DeploymentSpec{ Replicas: &DefaultReplicas, Selector: metav1.SetAsLabelSelector(deploy.GetLabels()), + Strategy: feast.getDeploymentStrategy(), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: deploy.GetLabels(), }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: string(feastType), - Image: *defaultServiceConfigs.Image, - Command: deploySettings.Command, - Ports: []corev1.ContainerPort{ - { - Name: string(feastType), - ContainerPort: deploySettings.TargetPort, - Protocol: corev1.ProtocolTCP, - }, - }, - Env: []corev1.EnvVar{ - { - Name: FeatureStoreYamlEnvVar, - Value: fsYamlB64, - }, - }, - LivenessProbe: &corev1.Probe{ - ProbeHandler: probeHandler, - InitialDelaySeconds: 30, - PeriodSeconds: 30, - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: probeHandler, - InitialDelaySeconds: 20, - PeriodSeconds: 10, - }, - }, + ServiceAccountName: feast.initFeastSA().Name, + }, + }, + } + if err := feast.setPod(&deploy.Spec.Template.Spec); err != nil { + return err + } + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, deploy, feast.Handler.Scheme) +} + +func (feast *FeastServices) setPod(podSpec *corev1.PodSpec) error { + if err := feast.setContainers(podSpec); err != nil { + return err + } + feast.mountTlsConfigs(podSpec) + feast.mountPvcConfigs(podSpec) + feast.mountEmptyDirVolumes(podSpec) + feast.mountUserDefinedVolumes(podSpec) + + return nil +} + +func (feast *FeastServices) setContainers(podSpec *corev1.PodSpec) error { + fsYamlB64, err := feast.GetServiceFeatureStoreYamlBase64() + if err != nil { + return err + } + + feast.setInitContainer(podSpec, fsYamlB64) + if feast.isRegistryServer() { + feast.setContainer(&podSpec.Containers, RegistryFeastType, fsYamlB64) + } + if feast.isOnlineServer() { + feast.setContainer(&podSpec.Containers, OnlineFeastType, fsYamlB64) + } + if feast.isOfflineServer() { + feast.setContainer(&podSpec.Containers, OfflineFeastType, fsYamlB64) + } + if feast.isUiServer() { + feast.setContainer(&podSpec.Containers, UIFeastType, fsYamlB64) + } + return nil +} + +func (feast *FeastServices) setContainer(containers *[]corev1.Container, feastType FeastServiceType, fsYamlB64 string) { + if serverConfigs := feast.getServerConfigs(feastType); serverConfigs != nil { + defaultCtrConfigs := serverConfigs.ContainerConfigs.DefaultCtrConfigs + tls := feast.getTlsConfigs(feastType) + probeHandler := getProbeHandler(feastType, tls) + container := &corev1.Container{ + Name: string(feastType), + Image: *defaultCtrConfigs.Image, + WorkingDir: feast.getFeatureRepoDir(), + Command: feast.getContainerCommand(feastType), + Ports: []corev1.ContainerPort{ + { + Name: string(feastType), + ContainerPort: getTargetPort(feastType, tls), + Protocol: corev1.ProtocolTCP, }, }, + Env: []corev1.EnvVar{ + { + Name: TmpFeatureStoreYamlEnvVar, + Value: fsYamlB64, + }, + }, + StartupProbe: &corev1.Probe{ + ProbeHandler: probeHandler, + PeriodSeconds: 3, + FailureThreshold: 40, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: probeHandler, + PeriodSeconds: 20, + FailureThreshold: 6, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: probeHandler, + PeriodSeconds: 10, + }, + } + applyOptionalCtrConfigs(container, serverConfigs.ContainerConfigs.OptionalCtrConfigs) + volumeMounts := feast.getVolumeMounts(feastType) + if len(volumeMounts) > 0 { + container.VolumeMounts = append(container.VolumeMounts, volumeMounts...) + } + *containers = append(*containers, *container) + } +} + +func (feast *FeastServices) mountUserDefinedVolumes(podSpec *corev1.PodSpec) { + var volumes []corev1.Volume + if feast.Handler.FeatureStore.Status.Applied.Services != nil { + volumes = feast.Handler.FeatureStore.Status.Applied.Services.Volumes + } + if len(volumes) > 0 { + podSpec.Volumes = append(podSpec.Volumes, volumes...) + } +} + +func (feast *FeastServices) getVolumeMounts(feastType FeastServiceType) (volumeMounts []corev1.VolumeMount) { + if serviceConfigs := feast.getServerConfigs(feastType); serviceConfigs != nil { + return serviceConfigs.VolumeMounts + } + return []corev1.VolumeMount{} // Default empty slice +} + +func (feast *FeastServices) setRoute(route *routev1.Route, feastType FeastServiceType) error { + + svcName := feast.GetFeastServiceName(feastType) + route.Labels = feast.getFeastTypeLabels(feastType) + + tls := feast.getTlsConfigs(feastType) + route.Spec = routev1.RouteSpec{ + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: svcName, }, + Port: &routev1.RoutePort{ + TargetPort: intstr.FromInt(int(getTargetPort(feastType, tls))), + }, + } + if tls.IsTLS() { + route.Spec.TLS = &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + } } - // configs are applied here - container := &deploy.Spec.Template.Spec.Containers[0] - applyOptionalContainerConfigs(container, serviceConfigs.OptionalConfigs) + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, route, feast.Handler.Scheme) +} - return controllerutil.SetControllerReference(feast.FeatureStore, deploy, feast.Scheme) +func (feast *FeastServices) getContainerCommand(feastType FeastServiceType) []string { + baseCommand := "feast" + options := []string{} + logLevel := feast.getLogLevelForType(feastType) + if logLevel != nil { + options = append(options, "--log-level", strings.ToUpper(*logLevel)) + } + + deploySettings := FeastServiceConstants[feastType] + targetPort := deploySettings.TargetHttpPort + tls := feast.getTlsConfigs(feastType) + if tls.IsTLS() { + targetPort = deploySettings.TargetHttpsPort + feastTlsPath := GetTlsPath(feastType) + deploySettings.Args = append(deploySettings.Args, []string{"--key", feastTlsPath + tls.SecretKeyNames.TlsKey, + "--cert", feastTlsPath + tls.SecretKeyNames.TlsCrt}...) + } + deploySettings.Args = append(deploySettings.Args, []string{"-p", strconv.Itoa(int(targetPort))}...) + + // Combine base command, options, and arguments + feastCommand := append([]string{baseCommand}, options...) + feastCommand = append(feastCommand, deploySettings.Args...) + + return feastCommand +} + +func (feast *FeastServices) getDeploymentStrategy() appsv1.DeploymentStrategy { + if feast.Handler.FeatureStore.Status.Applied.Services.DeploymentStrategy != nil { + return *feast.Handler.FeatureStore.Status.Applied.Services.DeploymentStrategy + } + return appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + } +} + +func (feast *FeastServices) setInitContainer(podSpec *corev1.PodSpec, fsYamlB64 string) { + applied := feast.Handler.FeatureStore.Status.Applied + if applied.FeastProjectDir != nil && !applied.Services.DisableInitContainers { + feastProjectDir := applied.FeastProjectDir + workingDir := getOfflineMountPath(feast.Handler.FeatureStore) + projectPath := workingDir + "/" + applied.FeastProject + container := corev1.Container{ + Name: "feast-init", + Image: getFeatureServerImage(), + Env: []corev1.EnvVar{ + { + Name: TmpFeatureStoreYamlEnvVar, + Value: fsYamlB64, + }, + }, + Command: []string{"bash", "-c"}, + WorkingDir: workingDir, + } + + var createCommand string + if feastProjectDir.Init != nil { + initSlice := []string{"feast", "init"} + if feastProjectDir.Init.Minimal { + initSlice = append(initSlice, "-m") + } + if len(feastProjectDir.Init.Template) > 0 { + initSlice = append(initSlice, "-t", feastProjectDir.Init.Template) + } + initSlice = append(initSlice, applied.FeastProject) + createCommand = strings.Join(initSlice, " ") + } else if feastProjectDir.Git != nil { + gitSlice := []string{"git"} + for key, value := range feastProjectDir.Git.Configs { + gitSlice = append(gitSlice, "-c", key+"="+value) + } + gitSlice = append(gitSlice, "clone", feastProjectDir.Git.URL, projectPath) + + if len(feastProjectDir.Git.Ref) > 0 { + gitSlice = append(gitSlice, "&&", "cd "+projectPath, "&&", "git checkout "+feastProjectDir.Git.Ref) + } + createCommand = strings.Join(gitSlice, " ") + + if feastProjectDir.Git.Env != nil { + container.Env = envOverride(container.Env, *feastProjectDir.Git.Env) + } + if feastProjectDir.Git.EnvFrom != nil { + container.EnvFrom = *feastProjectDir.Git.EnvFrom + } + } + + featureRepoDir := feast.getFeatureRepoDir() + container.Args = []string{ + "echo \"Creating feast repository...\"\necho '" + createCommand + "'\n" + + "if [[ ! -d " + featureRepoDir + " ]]; then " + createCommand + "; fi;\n" + + "echo $" + TmpFeatureStoreYamlEnvVar + " | base64 -d \u003e " + featureRepoDir + "/feature_store.yaml;\necho \"Feast repo creation complete\";\n", + } + podSpec.InitContainers = append(podSpec.InitContainers, container) + } } func (feast *FeastServices) setService(svc *corev1.Service, feastType FeastServiceType) error { - svc.Labels = feast.getLabels(feastType) - deploySettings := FeastServiceConstants[feastType] + svc.Labels = feast.getFeastTypeLabels(feastType) + if feast.isOpenShiftTls(feastType) { + svc.Annotations = map[string]string{ + "service.beta.openshift.io/serving-cert-secret-name": svc.Name + tlsNameSuffix, + } + } + var port int32 = HttpPort + scheme := HttpScheme + tls := feast.getTlsConfigs(feastType) + if tls.IsTLS() { + port = HttpsPort + scheme = HttpsScheme + } svc.Spec = corev1.ServiceSpec{ - Selector: svc.GetLabels(), + Selector: feast.getLabels(), Type: corev1.ServiceTypeClusterIP, Ports: []corev1.ServicePort{ { - Name: strings.ToLower(string(corev1.URISchemeHTTP)), - Port: HttpPort, + Name: scheme, + Port: port, Protocol: corev1.ProtocolTCP, - TargetPort: intstr.FromInt(int(deploySettings.TargetPort)), + TargetPort: intstr.FromInt(int(getTargetPort(feastType, tls))), }, }, } - return controllerutil.SetControllerReference(feast.FeatureStore, svc, feast.Scheme) + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, svc, feast.Handler.Scheme) } -func (feast *FeastServices) getServiceConfigs(feastType FeastServiceType) feastdevv1alpha1.ServiceConfigs { - appliedSpec := feast.FeatureStore.Status.Applied - if feastType == OfflineFeastType && appliedSpec.Services.OfflineStore != nil { - return appliedSpec.Services.OfflineStore.ServiceConfigs +func (feast *FeastServices) setServiceAccount(sa *corev1.ServiceAccount) error { + sa.Labels = feast.getLabels() + return controllerutil.SetControllerReference(feast.Handler.FeatureStore, sa, feast.Handler.Scheme) +} + +func (feast *FeastServices) createNewPVC(pvcCreate *feastdevv1alpha1.PvcCreate, feastType FeastServiceType) (*corev1.PersistentVolumeClaim, error) { + pvc := feast.initPVC(feastType) + + pvc.Spec = corev1.PersistentVolumeClaimSpec{ + AccessModes: pvcCreate.AccessModes, + Resources: pvcCreate.Resources, } - if feastType == OnlineFeastType && appliedSpec.Services.OnlineStore != nil { - return appliedSpec.Services.OnlineStore.ServiceConfigs + if pvcCreate.StorageClassName != nil { + pvc.Spec.StorageClassName = pvcCreate.StorageClassName } - if feastType == RegistryFeastType && appliedSpec.Services.Registry != nil { - if appliedSpec.Services.Registry.Local != nil { - return appliedSpec.Services.Registry.Local.ServiceConfigs + return pvc, controllerutil.SetControllerReference(feast.Handler.FeatureStore, pvc, feast.Handler.Scheme) +} + +func (feast *FeastServices) getServerConfigs(feastType FeastServiceType) *feastdevv1alpha1.ServerConfigs { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + switch feastType { + case OfflineFeastType: + if feast.isOfflineStore() { + return appliedServices.OfflineStore.Server + } + case OnlineFeastType: + if feast.isOnlineStore() { + return appliedServices.OnlineStore.Server + } + case RegistryFeastType: + if feast.isLocalRegistry() { + return appliedServices.Registry.Local.Server } + case UIFeastType: + return appliedServices.UI } - return feastdevv1alpha1.ServiceConfigs{} + return nil } -// GetObjectMeta returns the feast k8s object metadata -func (feast *FeastServices) GetObjectMeta(feastType FeastServiceType) metav1.ObjectMeta { - return metav1.ObjectMeta{Name: feast.GetFeastServiceName(feastType), Namespace: feast.FeatureStore.Namespace} +func (feast *FeastServices) getLogLevelForType(feastType FeastServiceType) *string { + if serviceConfigs := feast.getServerConfigs(feastType); serviceConfigs != nil { + return serviceConfigs.LogLevel + } + return nil +} + +// GetObjectMeta returns the feast k8s object metadata with type +func (feast *FeastServices) GetObjectMeta() metav1.ObjectMeta { + return metav1.ObjectMeta{Name: GetFeastName(feast.Handler.FeatureStore), Namespace: feast.Handler.FeatureStore.Namespace} +} + +// GetObjectMeta returns the feast k8s object metadata with type +func (feast *FeastServices) GetObjectMetaType(feastType FeastServiceType) metav1.ObjectMeta { + return metav1.ObjectMeta{Name: feast.GetFeastServiceName(feastType), Namespace: feast.Handler.FeatureStore.Namespace} } -// GetFeastServiceName returns the feast service object name based on service type func (feast *FeastServices) GetFeastServiceName(feastType FeastServiceType) string { - return feast.getFeastName() + "-" + string(feastType) + return GetFeastServiceName(feast.Handler.FeatureStore, feastType) } -func (feast *FeastServices) getFeastName() string { - return FeastPrefix + feast.FeatureStore.Name +func (feast *FeastServices) GetDeployment() (appsv1.Deployment, error) { + deployment := appsv1.Deployment{} + obj := feast.GetObjectMeta() + err := feast.Handler.Get(feast.Handler.Context, client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}, &deployment) + return deployment, err } -func (feast *FeastServices) getLabels(feastType FeastServiceType) map[string]string { +// GetFeastServiceName returns the feast service object name based on service type +func GetFeastServiceName(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) string { + return GetFeastName(featureStore) + "-" + string(feastType) +} + +func GetFeastName(featureStore *feastdevv1alpha1.FeatureStore) string { + return handler.FeastPrefix + featureStore.Name +} + +func (feast *FeastServices) getFeastTypeLabels(feastType FeastServiceType) map[string]string { + labels := feast.getLabels() + labels[ServiceTypeLabelKey] = string(feastType) + return labels +} + +func (feast *FeastServices) getLabels() map[string]string { return map[string]string{ - NameLabelKey: feast.FeatureStore.Name, - ServiceTypeLabelKey: string(feastType), + NameLabelKey: feast.Handler.FeatureStore.Name, } } func (feast *FeastServices) setServiceHostnames() error { - feast.FeatureStore.Status.ServiceHostnames = feastdevv1alpha1.ServiceHostnames{} - services := feast.FeatureStore.Status.Applied.Services - if services != nil { - domain := svcDomain + ":" + strconv.Itoa(HttpPort) - if services.OfflineStore != nil { - objMeta := feast.GetObjectMeta(OfflineFeastType) - feast.FeatureStore.Status.ServiceHostnames.OfflineStore = objMeta.Name + "." + objMeta.Namespace + domain - } - if services.OnlineStore != nil { - objMeta := feast.GetObjectMeta(OnlineFeastType) - feast.FeatureStore.Status.ServiceHostnames.OnlineStore = objMeta.Name + "." + objMeta.Namespace + domain - } - if feast.isLocalRegistry() { - objMeta := feast.GetObjectMeta(RegistryFeastType) - feast.FeatureStore.Status.ServiceHostnames.Registry = objMeta.Name + "." + objMeta.Namespace + domain - } else if feast.isRemoteRegistry() { - return feast.setRemoteRegistryURL() - } + feast.Handler.FeatureStore.Status.ServiceHostnames = feastdevv1alpha1.ServiceHostnames{} + domain := svcDomain + ":" + if feast.isOfflineServer() { + objMeta := feast.initFeastSvc(OfflineFeastType) + feast.Handler.FeatureStore.Status.ServiceHostnames.OfflineStore = objMeta.Name + "." + objMeta.Namespace + domain + + getPortStr(feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.Server.TLS) + } + if feast.isOnlineServer() { + objMeta := feast.initFeastSvc(OnlineFeastType) + feast.Handler.FeatureStore.Status.ServiceHostnames.OnlineStore = objMeta.Name + "." + objMeta.Namespace + domain + + getPortStr(feast.Handler.FeatureStore.Status.Applied.Services.OnlineStore.Server.TLS) + } + if feast.isRegistryServer() { + objMeta := feast.initFeastSvc(RegistryFeastType) + feast.Handler.FeatureStore.Status.ServiceHostnames.Registry = objMeta.Name + "." + objMeta.Namespace + domain + + getPortStr(feast.Handler.FeatureStore.Status.Applied.Services.Registry.Local.Server.TLS) + } else if feast.isRemoteRegistry() { + return feast.setRemoteRegistryURL() + } + if feast.isUiServer() { + objMeta := feast.initFeastSvc(UIFeastType) + feast.Handler.FeatureStore.Status.ServiceHostnames.UI = objMeta.Name + "." + objMeta.Namespace + domain + + getPortStr(feast.Handler.FeatureStore.Status.Applied.Services.UI.TLS) } return nil } @@ -276,87 +716,123 @@ func (feast *FeastServices) setServiceHostnames() error { func (feast *FeastServices) setFeastServiceCondition(err error, feastType FeastServiceType) error { conditionMap := FeastServiceConditions[feastType] if err != nil { - logger := log.FromContext(feast.Context) + logger := log.FromContext(feast.Handler.Context) cond := conditionMap[metav1.ConditionFalse] cond.Message = "Error: " + err.Error() - apimeta.SetStatusCondition(&feast.FeatureStore.Status.Conditions, cond) + apimeta.SetStatusCondition(&feast.Handler.FeatureStore.Status.Conditions, cond) logger.Error(err, "Error deploying the FeatureStore "+string(ClientFeastType)+" service") return err } else { - apimeta.SetStatusCondition(&feast.FeatureStore.Status.Conditions, conditionMap[metav1.ConditionTrue]) + apimeta.SetStatusCondition(&feast.Handler.FeatureStore.Status.Conditions, conditionMap[metav1.ConditionTrue]) } return nil } func (feast *FeastServices) setRemoteRegistryURL() error { if feast.isRemoteHostnameRegistry() { - feast.FeatureStore.Status.ServiceHostnames.Registry = *feast.FeatureStore.Status.Applied.Services.Registry.Remote.Hostname + feast.Handler.FeatureStore.Status.ServiceHostnames.Registry = *feast.Handler.FeatureStore.Status.Applied.Services.Registry.Remote.Hostname } else if feast.IsRemoteRefRegistry() { - feastRemoteRef := feast.FeatureStore.Status.Applied.Services.Registry.Remote.FeastRef - // default to FeatureStore namespace if not set - if len(feastRemoteRef.Namespace) == 0 { - feastRemoteRef.Namespace = feast.FeatureStore.Namespace + remoteFeast, err := feast.getRemoteRegistryFeastHandler() + if err != nil { + return err + } + // referenced/remote registry must use the local registry server option and be in a 'Ready' state. + if remoteFeast != nil && + remoteFeast.isRegistryServer() && + apimeta.IsStatusConditionTrue(remoteFeast.Handler.FeatureStore.Status.Conditions, feastdevv1alpha1.RegistryReadyType) && + len(remoteFeast.Handler.FeatureStore.Status.ServiceHostnames.Registry) > 0 { + feast.Handler.FeatureStore.Status.ServiceHostnames.Registry = remoteFeast.Handler.FeatureStore.Status.ServiceHostnames.Registry + } else { + return errors.New("Remote feast registry of referenced FeatureStore '" + remoteFeast.Handler.FeatureStore.Name + "' is not ready") } + } + return nil +} +func (feast *FeastServices) getRemoteRegistryFeastHandler() (*FeastServices, error) { + if feast.IsRemoteRefRegistry() { + feastRemoteRef := feast.Handler.FeatureStore.Status.Applied.Services.Registry.Remote.FeastRef nsName := types.NamespacedName{Name: feastRemoteRef.Name, Namespace: feastRemoteRef.Namespace} - crNsName := client.ObjectKeyFromObject(feast.FeatureStore) + crNsName := client.ObjectKeyFromObject(feast.Handler.FeatureStore) if nsName == crNsName { - return errors.New("FeatureStore '" + crNsName.Name + "' can't reference itself in `spec.services.registry.remote.feastRef`") + return nil, errors.New("FeatureStore '" + crNsName.Name + "' can't reference itself in `spec.services.registry.remote.feastRef`") } - remoteFeastObj := &feastdevv1alpha1.FeatureStore{} - if err := feast.Client.Get(feast.Context, nsName, remoteFeastObj); err != nil { + if err := feast.Handler.Client.Get(feast.Handler.Context, nsName, remoteFeastObj); err != nil { if apierrors.IsNotFound(err) { - return errors.New("Referenced FeatureStore '" + feastRemoteRef.Name + "' was not found") + return nil, errors.New("Referenced FeatureStore '" + feastRemoteRef.Name + "' was not found") } - return err - } - - remoteFeast := FeastServices{ - Client: feast.Client, - Context: feast.Context, - FeatureStore: remoteFeastObj, - Scheme: feast.Scheme, + return nil, err } - // referenced/remote registry must use the local install option and be in a 'Ready' state. - if remoteFeast.isLocalRegistry() && apimeta.IsStatusConditionTrue(remoteFeastObj.Status.Conditions, feastdevv1alpha1.RegistryReadyType) { - feast.FeatureStore.Status.ServiceHostnames.Registry = remoteFeastObj.Status.ServiceHostnames.Registry - } else { - return errors.New("Remote feast registry of referenced FeatureStore '" + feastRemoteRef.Name + "' is not ready") + if feast.Handler.FeatureStore.Status.Applied.FeastProject != remoteFeastObj.Status.Applied.FeastProject { + return nil, errors.New("FeatureStore '" + remoteFeastObj.Name + "' is using a different feast project than '" + feast.Handler.FeatureStore.Status.Applied.FeastProject + "'. Project names must match.") } + return &FeastServices{ + Handler: handler.FeastHandler{ + Client: feast.Handler.Client, + Context: feast.Handler.Context, + FeatureStore: remoteFeastObj, + Scheme: feast.Handler.Scheme, + }, + }, nil } - return nil + return nil, nil } func (feast *FeastServices) isLocalRegistry() bool { - appliedServices := feast.FeatureStore.Status.Applied.Services - return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Local != nil + return IsLocalRegistry(feast.Handler.FeatureStore) +} + +func (feast *FeastServices) isRegistryServer() bool { + return IsRegistryServer(feast.Handler.FeatureStore) } func (feast *FeastServices) isRemoteRegistry() bool { - appliedServices := feast.FeatureStore.Status.Applied.Services - return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Remote != nil + return isRemoteRegistry(feast.Handler.FeatureStore) } func (feast *FeastServices) IsRemoteRefRegistry() bool { - if feast.isRemoteRegistry() { - remote := feast.FeatureStore.Status.Applied.Services.Registry.Remote - return remote != nil && remote.FeastRef != nil - } - return false + return feast.isRemoteRegistry() && + feast.Handler.FeatureStore.Status.Applied.Services.Registry.Remote.FeastRef != nil } func (feast *FeastServices) isRemoteHostnameRegistry() bool { - if feast.isRemoteRegistry() { - remote := feast.FeatureStore.Status.Applied.Services.Registry.Remote - return remote != nil && remote.Hostname != nil - } - return false + return feast.isRemoteRegistry() && + feast.Handler.FeatureStore.Status.Applied.Services.Registry.Remote.Hostname != nil } -func (feast *FeastServices) initFeastDeploy(feastType FeastServiceType) *appsv1.Deployment { +func (feast *FeastServices) isOfflineServer() bool { + return feast.isOfflineStore() && + feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.Server != nil +} + +func (feast *FeastServices) isOfflineStore() bool { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + return appliedServices != nil && appliedServices.OfflineStore != nil +} + +func (feast *FeastServices) isOnlineServer() bool { + return feast.isOnlineStore() && + feast.Handler.FeatureStore.Status.Applied.Services.OnlineStore.Server != nil +} + +func (feast *FeastServices) isOnlineStore() bool { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + return appliedServices != nil && appliedServices.OnlineStore != nil +} + +func (feast *FeastServices) noLocalCoreServerConfigured() bool { + return !(feast.isRegistryServer() || feast.isOnlineServer() || feast.isOfflineServer()) +} + +func (feast *FeastServices) isUiServer() bool { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + return appliedServices != nil && appliedServices.UI != nil +} + +func (feast *FeastServices) initFeastDeploy() *appsv1.Deployment { deploy := &appsv1.Deployment{ - ObjectMeta: feast.GetObjectMeta(feastType), + ObjectMeta: feast.GetObjectMeta(), } deploy.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("Deployment")) return deploy @@ -364,33 +840,42 @@ func (feast *FeastServices) initFeastDeploy(feastType FeastServiceType) *appsv1. func (feast *FeastServices) initFeastSvc(feastType FeastServiceType) *corev1.Service { svc := &corev1.Service{ - ObjectMeta: feast.GetObjectMeta(feastType), + ObjectMeta: feast.GetObjectMetaType(feastType), } svc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) return svc } -// delete an object if the FeatureStore is set as the object's controller/owner -func (feast *FeastServices) deleteOwnedFeastObj(obj client.Object) error { - if err := feast.Client.Get(feast.Context, client.ObjectKeyFromObject(obj), obj); err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return err +func (feast *FeastServices) initFeastSA() *corev1.ServiceAccount { + sa := &corev1.ServiceAccount{ + ObjectMeta: feast.GetObjectMeta(), } - for _, ref := range obj.GetOwnerReferences() { - if *ref.Controller && ref.UID == feast.FeatureStore.UID { - if err := feast.Client.Delete(feast.Context, obj); err != nil { - return err - } - } + sa.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ServiceAccount")) + return sa +} + +func (feast *FeastServices) initPVC(feastType FeastServiceType) *corev1.PersistentVolumeClaim { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: feast.GetObjectMetaType(feastType), } - return nil + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + return pvc } -func applyOptionalContainerConfigs(container *corev1.Container, optionalConfigs feastdevv1alpha1.OptionalConfigs) { +func (feast *FeastServices) initRoute(feastType FeastServiceType) *routev1.Route { + route := &routev1.Route{ + ObjectMeta: feast.GetObjectMetaType(feastType), + } + route.SetGroupVersionKind(routev1.SchemeGroupVersion.WithKind("Route")) + return route +} + +func applyOptionalCtrConfigs(container *corev1.Container, optionalConfigs feastdevv1alpha1.OptionalCtrConfigs) { if optionalConfigs.Env != nil { - container.Env = mergeEnvVarsArrays(container.Env, optionalConfigs.Env) + container.Env = envOverride(container.Env, *optionalConfigs.Env) + } + if optionalConfigs.EnvFrom != nil { + container.EnvFrom = *optionalConfigs.EnvFrom } if optionalConfigs.ImagePullPolicy != nil { container.ImagePullPolicy = *optionalConfigs.ImagePullPolicy @@ -400,24 +885,119 @@ func applyOptionalContainerConfigs(container *corev1.Container, optionalConfigs } } -func mergeEnvVarsArrays(envVars1 []corev1.EnvVar, envVars2 *[]corev1.EnvVar) []corev1.EnvVar { - merged := make(map[string]corev1.EnvVar) +func (feast *FeastServices) mountPvcConfigs(podSpec *corev1.PodSpec) { + for _, feastType := range feastServerTypes { + if pvcConfig, hasPvcConfig := hasPvcConfig(feast.Handler.FeatureStore, feastType); hasPvcConfig { + feast.mountPvcConfig(podSpec, pvcConfig, feastType) + } + } +} + +func (feast *FeastServices) mountPvcConfig(podSpec *corev1.PodSpec, pvcConfig *feastdevv1alpha1.PvcConfig, feastType FeastServiceType) { + if podSpec != nil && pvcConfig != nil { + volName := feast.initPVC(feastType).Name + pvcName := volName + if pvcConfig.Ref != nil { + pvcName = pvcConfig.Ref.Name + } + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }) + if feastType == OfflineFeastType { + for i := range podSpec.InitContainers { + podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: pvcConfig.MountPath, + }) + } + } + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: pvcConfig.MountPath, + }) + } + } +} + +func (feast *FeastServices) mountEmptyDirVolumes(podSpec *corev1.PodSpec) { + if shouldMountEmptyDir(feast.Handler.FeatureStore) { + mountEmptyDirVolume(podSpec) + } +} + +func (feast *FeastServices) getFeatureRepoDir() string { + applied := feast.Handler.FeatureStore.Status.Applied + feastProjectDir := getOfflineMountPath(feast.Handler.FeatureStore) + "/" + applied.FeastProject + if applied.FeastProjectDir != nil && applied.FeastProjectDir.Git != nil && len(applied.FeastProjectDir.Git.FeatureRepoPath) > 0 { + return feastProjectDir + "/" + applied.FeastProjectDir.Git.FeatureRepoPath + } + return feastProjectDir + "/" + FeatureRepoDir +} - // Add all env vars from the first array - for _, envVar := range envVars1 { - merged[envVar.Name] = envVar +func mountEmptyDirVolume(podSpec *corev1.PodSpec) { + if podSpec != nil { + volName := strings.TrimPrefix(EphemeralPath, "/") + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + for i := range podSpec.InitContainers { + podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: EphemeralPath, + }) + } + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: EphemeralPath, + }) + } } +} - // Add all env vars from the second array, overriding duplicates - for _, envVar := range *envVars2 { - merged[envVar.Name] = envVar +func getTargetPort(feastType FeastServiceType, tls *feastdevv1alpha1.TlsConfigs) int32 { + if tls.IsTLS() { + return FeastServiceConstants[feastType].TargetHttpsPort } + return FeastServiceConstants[feastType].TargetHttpPort +} - // Convert the map back to an array - result := make([]corev1.EnvVar, 0, len(merged)) - for _, envVar := range merged { - result = append(result, envVar) +func getProbeHandler(feastType FeastServiceType, tls *feastdevv1alpha1.TlsConfigs) corev1.ProbeHandler { + targetPort := getTargetPort(feastType, tls) + if feastType == OnlineFeastType { + probeHandler := corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt(int(targetPort)), + }, + } + if tls.IsTLS() { + probeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + return probeHandler } + return corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(int(targetPort)), + }, + } +} - return result +func IsDeploymentAvailable(conditions []appsv1.DeploymentCondition) bool { + for _, condition := range conditions { + if condition.Type == appsv1.DeploymentAvailable { + return condition.Status == corev1.ConditionTrue + } + } + + return false } diff --git a/infra/feast-operator/internal/controller/services/services_types.go b/infra/feast-operator/internal/controller/services/services_types.go index c2348666179..ac75caad19a 100644 --- a/infra/feast-operator/internal/controller/services/services_types.go +++ b/infra/feast-operator/internal/controller/services/services_types.go @@ -17,59 +17,98 @@ limitations under the License. package services import ( - "context" - "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + handler "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - FeastPrefix = "feast-" - FeatureStoreYamlEnvVar = "FEATURE_STORE_YAML_BASE64" - FeatureStoreYamlCmKey = "feature_store.yaml" - LocalRegistryPath = "/tmp/registry.db" - LocalOnlinePath = "/tmp/online_store.db" - svcDomain = ".svc.cluster.local" - HttpPort = 80 + TmpFeatureStoreYamlEnvVar = "TMP_FEATURE_STORE_YAML_BASE64" + feastServerImageVar = "RELATED_IMAGE_FEATURE_SERVER" + FeatureStoreYamlCmKey = "feature_store.yaml" + EphemeralPath = "/feast-data" + FeatureRepoDir = "feature_repo" + DefaultRegistryPath = "registry.db" + DefaultOnlineStorePath = "online_store.db" + svcDomain = ".svc.cluster.local" + + HttpPort = 80 + HttpsPort = 443 + HttpScheme = "http" + HttpsScheme = "https" + tlsPath = "/tls/" + tlsNameSuffix = "-tls" + + DefaultOfflineStorageRequest = "20Gi" + DefaultOnlineStorageRequest = "5Gi" + DefaultRegistryStorageRequest = "5Gi" OfflineFeastType FeastServiceType = "offline" OnlineFeastType FeastServiceType = "online" RegistryFeastType FeastServiceType = "registry" + UIFeastType FeastServiceType = "ui" ClientFeastType FeastServiceType = "client" + ClientCaFeastType FeastServiceType = "client-ca" - OfflineRemoteConfigType OfflineConfigType = "remote" - OfflineDaskConfigType OfflineConfigType = "dask" + OfflineRemoteConfigType OfflineConfigType = "remote" + OfflineFilePersistenceDaskConfigType OfflineConfigType = "dask" + OfflineFilePersistenceDuckDbConfigType OfflineConfigType = "duckdb" + OfflineDBPersistenceSnowflakeConfigType OfflineConfigType = "snowflake.offline" - OnlineRemoteConfigType OnlineConfigType = "remote" - OnlineSqliteConfigType OnlineConfigType = "sqlite" + OnlineRemoteConfigType OnlineConfigType = "remote" + OnlineSqliteConfigType OnlineConfigType = "sqlite" + OnlineDBPersistenceSnowflakeConfigType OnlineConfigType = "snowflake.online" + OnlineDBPersistenceCassandraConfigType OnlineConfigType = "cassandra" - RegistryRemoteConfigType RegistryConfigType = "remote" - RegistryFileConfigType RegistryConfigType = "file" + RegistryRemoteConfigType RegistryConfigType = "remote" + RegistryFileConfigType RegistryConfigType = "file" + RegistryDBPersistenceSnowflakeConfigType RegistryConfigType = "snowflake.registry" + RegistryDBPersistenceSQLConfigType RegistryConfigType = "sql" LocalProviderType FeastProviderType = "local" + + NoAuthAuthType AuthzType = "no_auth" + KubernetesAuthType AuthzType = "kubernetes" + OidcAuthType AuthzType = "oidc" + + OidcClientId OidcPropertyType = "client_id" + OidcAuthDiscoveryUrl OidcPropertyType = "auth_discovery_url" + OidcClientSecret OidcPropertyType = "client_secret" + OidcUsername OidcPropertyType = "username" + OidcPassword OidcPropertyType = "password" + + OidcMissingSecretError string = "missing OIDC secret: %s" ) var ( - DefaultImage = "feastdev/feature-server:" + feastversion.FeastVersion - DefaultReplicas = int32(1) - NameLabelKey = feastdevv1alpha1.GroupVersion.Group + "/name" - ServiceTypeLabelKey = feastdevv1alpha1.GroupVersion.Group + "/service-type" + DefaultImage = "feastdev/feature-server:" + feastversion.FeastVersion + DefaultReplicas = int32(1) + DefaultPVCAccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + NameLabelKey = feastdevv1alpha1.GroupVersion.Group + "/name" + ServiceTypeLabelKey = feastdevv1alpha1.GroupVersion.Group + "/service-type" FeastServiceConstants = map[FeastServiceType]deploymentSettings{ OfflineFeastType: { - Command: []string{"feast", "serve_offline", "-h", "0.0.0.0"}, - TargetPort: 8815, + Args: []string{"serve_offline", "-h", "0.0.0.0"}, + TargetHttpPort: 8815, + TargetHttpsPort: 8816, }, OnlineFeastType: { - Command: []string{"feast", "serve", "-h", "0.0.0.0"}, - TargetPort: 6566, + Args: []string{"serve", "-h", "0.0.0.0"}, + TargetHttpPort: 6566, + TargetHttpsPort: 6567, }, RegistryFeastType: { - Command: []string{"feast", "serve_registry"}, - TargetPort: 6570, + Args: []string{"serve_registry"}, + TargetHttpPort: 6570, + TargetHttpsPort: 6571, + }, + UIFeastType: { + Args: []string{"ui", "-h", "0.0.0.0"}, + TargetHttpPort: 8888, + TargetHttpsPort: 8443, }, } @@ -113,6 +152,20 @@ var ( Reason: feastdevv1alpha1.RegistryFailedReason, }, }, + UIFeastType: { + metav1.ConditionTrue: { + Type: feastdevv1alpha1.UIReadyType, + Status: metav1.ConditionTrue, + Reason: feastdevv1alpha1.ReadyReason, + Message: feastdevv1alpha1.UIReadyMessage, + }, + metav1.ConditionFalse: { + Type: feastdevv1alpha1.UIReadyType, + Status: metav1.ConditionFalse, + Reason: feastdevv1alpha1.UIFailedReason, + }, + }, + ClientFeastType: { metav1.ConditionTrue: { Type: feastdevv1alpha1.ClientReadyType, @@ -127,8 +180,24 @@ var ( }, }, } + + OidcServerProperties = []OidcPropertyType{OidcClientId, OidcAuthDiscoveryUrl} + OidcClientProperties = []OidcPropertyType{OidcClientSecret, OidcUsername, OidcPassword} ) +// Feast server types: Reserved only for server types like Online, Offline, and Registry servers. Should not be used for client types like the UI, etc. +var feastServerTypes = []FeastServiceType{ + RegistryFeastType, + OfflineFeastType, + OnlineFeastType, +} + +// AuthzType defines the authorization type +type AuthzType string + +// OidcPropertyType defines the OIDC property type +type OidcPropertyType string + // FeastServiceType is the type of feast service type FeastServiceType string @@ -146,10 +215,7 @@ type FeastProviderType string // FeastServices is an interface for configuring and deploying feast services type FeastServices struct { - client.Client - Context context.Context - Scheme *runtime.Scheme - FeatureStore *feastdevv1alpha1.FeatureStore + Handler handler.FeastHandler } // RepoConfig is the Repo config. Typically loaded from feature_store.yaml. @@ -160,29 +226,45 @@ type RepoConfig struct { OfflineStore OfflineStoreConfig `yaml:"offline_store,omitempty"` OnlineStore OnlineStoreConfig `yaml:"online_store,omitempty"` Registry RegistryConfig `yaml:"registry,omitempty"` + AuthzConfig AuthzConfig `yaml:"auth,omitempty"` EntityKeySerializationVersion int `yaml:"entity_key_serialization_version,omitempty"` } // OfflineStoreConfig is the configuration that relates to reading from and writing to the Feast offline store. type OfflineStoreConfig struct { - Host string `yaml:"host,omitempty"` - Type OfflineConfigType `yaml:"type,omitempty"` - Port int `yaml:"port,omitempty"` + Host string `yaml:"host,omitempty"` + Type OfflineConfigType `yaml:"type,omitempty"` + Port int `yaml:"port,omitempty"` + Scheme string `yaml:"scheme,omitempty"` + Cert string `yaml:"cert,omitempty"` + DBParameters map[string]interface{} `yaml:",inline,omitempty"` } // OnlineStoreConfig is the configuration that relates to reading from and writing to the Feast online store. type OnlineStoreConfig struct { - Path string `yaml:"path,omitempty"` - Type OnlineConfigType `yaml:"type,omitempty"` + Path string `yaml:"path,omitempty"` + Type OnlineConfigType `yaml:"type,omitempty"` + Cert string `yaml:"cert,omitempty"` + DBParameters map[string]interface{} `yaml:",inline,omitempty"` } // RegistryConfig is the configuration that relates to reading from and writing to the Feast registry. type RegistryConfig struct { - Path string `yaml:"path,omitempty"` - RegistryType RegistryConfigType `yaml:"registry_type,omitempty"` + Path string `yaml:"path,omitempty"` + RegistryType RegistryConfigType `yaml:"registry_type,omitempty"` + Cert string `yaml:"cert,omitempty"` + S3AdditionalKwargs *map[string]string `yaml:"s3_additional_kwargs,omitempty"` + DBParameters map[string]interface{} `yaml:",inline,omitempty"` +} + +// AuthzConfig is the RBAC authorization configuration. +type AuthzConfig struct { + Type AuthzType `yaml:"type,omitempty"` + OidcParameters map[string]interface{} `yaml:",inline,omitempty"` } type deploymentSettings struct { - Command []string - TargetPort int32 + Args []string + TargetHttpPort int32 + TargetHttpsPort int32 } diff --git a/infra/feast-operator/internal/controller/services/suite_test.go b/infra/feast-operator/internal/controller/services/suite_test.go new file mode 100644 index 00000000000..5e922bc7e4a --- /dev/null +++ b/infra/feast-operator/internal/controller/services/suite_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestServices(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Services Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = feastdevv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +func testSetIsOpenShift() { + isOpenShift = true +} diff --git a/infra/feast-operator/internal/controller/services/tls.go b/infra/feast-operator/internal/controller/services/tls.go new file mode 100644 index 00000000000..03a26a9031d --- /dev/null +++ b/infra/feast-operator/internal/controller/services/tls.go @@ -0,0 +1,260 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services + +import ( + "strconv" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func (feast *FeastServices) setTlsDefaults() error { + if err := feast.setOpenshiftTls(); err != nil { + return err + } + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + if feast.isOfflineServer() { + tlsDefaults(appliedServices.OfflineStore.Server.TLS) + } + if feast.isOnlineServer() { + tlsDefaults(appliedServices.OnlineStore.Server.TLS) + } + if feast.isRegistryServer() { + tlsDefaults(appliedServices.Registry.Local.Server.TLS) + } + if feast.isUiServer() { + tlsDefaults(appliedServices.UI.TLS) + } + return nil +} + +func (feast *FeastServices) setOpenshiftTls() error { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + if feast.offlineOpenshiftTls() { + appliedServices.OfflineStore.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{ + Name: feast.initFeastSvc(OfflineFeastType).Name + tlsNameSuffix, + }, + } + } + if feast.onlineOpenshiftTls() { + appliedServices.OnlineStore.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{ + Name: feast.initFeastSvc(OnlineFeastType).Name + tlsNameSuffix, + }, + } + } + if feast.uiOpenshiftTls() { + appliedServices.UI.TLS = &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{ + Name: feast.initFeastSvc(UIFeastType).Name + tlsNameSuffix, + }, + } + } + if feast.localRegistryOpenshiftTls() { + appliedServices.Registry.Local.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{ + Name: feast.initFeastSvc(RegistryFeastType).Name + tlsNameSuffix, + }, + } + } else if remote, err := feast.remoteRegistryOpenshiftTls(); remote { + // if the remote registry reference is using openshift's service serving certificates, we can use the injected service CA bundle configMap + if appliedServices.Registry.Remote.TLS == nil { + appliedServices.Registry.Remote.TLS = &feastdevv1alpha1.TlsRemoteRegistryConfigs{ + ConfigMapRef: corev1.LocalObjectReference{ + Name: feast.initCaConfigMap().Name, + }, + CertName: "service-ca.crt", + } + } + } else if err != nil { + return err + } + return nil +} + +func (feast *FeastServices) checkOpenshiftTls() (bool, error) { + if feast.offlineOpenshiftTls() || feast.onlineOpenshiftTls() || feast.localRegistryOpenshiftTls() || feast.uiOpenshiftTls() { + return true, nil + } + return feast.remoteRegistryOpenshiftTls() +} + +func (feast *FeastServices) isOpenShiftTls(feastType FeastServiceType) (isOpenShift bool) { + switch feastType { + case OfflineFeastType: + isOpenShift = feast.offlineOpenshiftTls() + case OnlineFeastType: + isOpenShift = feast.onlineOpenshiftTls() + case RegistryFeastType: + isOpenShift = feast.localRegistryOpenshiftTls() + case UIFeastType: + isOpenShift = feast.uiOpenshiftTls() + } + + return +} + +func (feast *FeastServices) getTlsConfigs(feastType FeastServiceType) *feastdevv1alpha1.TlsConfigs { + if serviceConfigs := feast.getServerConfigs(feastType); serviceConfigs != nil { + return serviceConfigs.TLS + } + return nil +} + +// True if running in an openshift cluster and Tls not configured in the service Spec +func (feast *FeastServices) offlineOpenshiftTls() bool { + return isOpenShift && + feast.isOfflineServer() && feast.Handler.FeatureStore.Spec.Services.OfflineStore.Server.TLS == nil +} + +// True if running in an openshift cluster and Tls not configured in the service Spec +func (feast *FeastServices) onlineOpenshiftTls() bool { + return isOpenShift && + feast.isOnlineServer() && + (feast.Handler.FeatureStore.Spec.Services == nil || + feast.Handler.FeatureStore.Spec.Services.OnlineStore == nil || + feast.Handler.FeatureStore.Spec.Services.OnlineStore.Server == nil || + feast.Handler.FeatureStore.Spec.Services.OnlineStore.Server.TLS == nil) +} + +// True if running in an openshift cluster and Tls not configured in the service Spec +func (feast *FeastServices) uiOpenshiftTls() bool { + return isOpenShift && + feast.isUiServer() && feast.Handler.FeatureStore.Spec.Services.UI.TLS == nil +} + +// True if running in an openshift cluster and Tls not configured in the service Spec +func (feast *FeastServices) localRegistryOpenshiftTls() bool { + return isOpenShift && + feast.isRegistryServer() && feast.Handler.FeatureStore.Spec.Services.Registry.Local.Server.TLS == nil +} + +// True if running in an openshift cluster, and using a remote registry in the same cluster, with no remote Tls set in the service Spec +func (feast *FeastServices) remoteRegistryOpenshiftTls() (bool, error) { + if isOpenShift && feast.isRemoteRegistry() { + remoteFeast, err := feast.getRemoteRegistryFeastHandler() + if err != nil { + return false, err + } + return (remoteFeast != nil && remoteFeast.localRegistryOpenshiftTls() && + feast.Handler.FeatureStore.Spec.Services.Registry.Remote.TLS == nil), + nil + } + return false, nil +} + +func (feast *FeastServices) localRegistryTls() bool { + return localRegistryTls(feast.Handler.FeatureStore) +} + +func (feast *FeastServices) remoteRegistryTls() bool { + return remoteRegistryTls(feast.Handler.FeatureStore) +} + +func (feast *FeastServices) mountRegistryClientTls(podSpec *corev1.PodSpec) { + if podSpec != nil { + if feast.localRegistryTls() { + feast.mountTlsConfig(RegistryFeastType, podSpec) + } else if feast.remoteRegistryTls() { + mountTlsRemoteRegistryConfig(podSpec, + feast.Handler.FeatureStore.Status.Applied.Services.Registry.Remote.TLS) + } + } +} + +func (feast *FeastServices) mountTlsConfigs(podSpec *corev1.PodSpec) { + // how deal w/ client deployment tls mounts when the time comes? new function? + feast.mountRegistryClientTls(podSpec) + feast.mountTlsConfig(OfflineFeastType, podSpec) + feast.mountTlsConfig(OnlineFeastType, podSpec) + feast.mountTlsConfig(UIFeastType, podSpec) +} + +func (feast *FeastServices) mountTlsConfig(feastType FeastServiceType, podSpec *corev1.PodSpec) { + tls := feast.getTlsConfigs(feastType) + if tls.IsTLS() && podSpec != nil { + volName := string(feastType) + tlsNameSuffix + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: tls.SecretRef.Name, + }, + }, + }) + if i, container := getContainerByType(feastType, *podSpec); container != nil { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: GetTlsPath(feastType), + ReadOnly: true, + }) + } + } +} + +func mountTlsRemoteRegistryConfig(podSpec *corev1.PodSpec, tls *feastdevv1alpha1.TlsRemoteRegistryConfigs) { + if tls != nil { + volName := string(RegistryFeastType) + tlsNameSuffix + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: tls.ConfigMapRef, + }, + }, + }) + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: volName, + MountPath: GetTlsPath(RegistryFeastType), + ReadOnly: true, + }) + } + } +} + +func getPortStr(tls *feastdevv1alpha1.TlsConfigs) string { + if tls.IsTLS() { + return strconv.Itoa(HttpsPort) + } + return strconv.Itoa(HttpPort) +} + +func tlsDefaults(tls *feastdevv1alpha1.TlsConfigs) { + if tls.IsTLS() { + if len(tls.SecretKeyNames.TlsCrt) == 0 { + tls.SecretKeyNames.TlsCrt = "tls.crt" + } + if len(tls.SecretKeyNames.TlsKey) == 0 { + tls.SecretKeyNames.TlsKey = "tls.key" + } + } +} + +func localRegistryTls(featureStore *feastdevv1alpha1.FeatureStore) bool { + return IsRegistryServer(featureStore) && featureStore.Status.Applied.Services.Registry.Local.Server.TLS.IsTLS() +} + +func remoteRegistryTls(featureStore *feastdevv1alpha1.FeatureStore) bool { + return isRemoteRegistry(featureStore) && featureStore.Status.Applied.Services.Registry.Remote.TLS != nil +} + +func GetTlsPath(feastType FeastServiceType) string { + return tlsPath + string(feastType) + "/" +} diff --git a/infra/feast-operator/internal/controller/services/tls_test.go b/infra/feast-operator/internal/controller/services/tls_test.go new file mode 100644 index 00000000000..04925ff02ee --- /dev/null +++ b/infra/feast-operator/internal/controller/services/tls_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +// test tls functions directly +var _ = Describe("TLS Config", func() { + Context("When reconciling a FeatureStore", func() { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(feastdevv1alpha1.AddToScheme(scheme)) + + secretKeyNames := feastdevv1alpha1.SecretKeyNames{ + TlsCrt: "tls.crt", + TlsKey: "tls.key", + } + + It("should set default TLS configs", func() { + By("Having the created resource") + + // registry server w/o tls + feast := FeastServices{ + Handler: handler.FeastHandler{ + FeatureStore: minimalFeatureStore(), + Scheme: scheme, + }, + } + feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + }, + } + err := feast.ApplyDefaults() + Expect(err).ToNot(HaveOccurred()) + + tls := feast.getTlsConfigs(RegistryFeastType) + Expect(tls).To(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + Expect(getPortStr(tls)).To(Equal("80")) + + Expect(feast.remoteRegistryTls()).To(BeFalse()) + Expect(feast.localRegistryTls()).To(BeFalse()) + Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(OnlineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(RegistryFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(UIFeastType)).To(BeFalse()) + + openshiftTls, err := feast.checkOpenshiftTls() + Expect(err).ToNot(HaveOccurred()) + Expect(openshiftTls).To(BeFalse()) + + // registry service w/ openshift tls + testSetIsOpenShift() + feast.Handler.FeatureStore = minimalFeatureStore() + feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{}, + }, + }, + } + err = feast.ApplyDefaults() + Expect(err).ToNot(HaveOccurred()) + + tls = feast.getTlsConfigs(OfflineFeastType) + Expect(tls).To(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + tls = feast.getTlsConfigs(OnlineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + tls = feast.getTlsConfigs(UIFeastType) + Expect(tls).To(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + tls = feast.getTlsConfigs(RegistryFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + Expect(tls.SecretKeyNames).To(Equal(secretKeyNames)) + Expect(getPortStr(tls)).To(Equal("443")) + Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) + + Expect(feast.remoteRegistryTls()).To(BeFalse()) + Expect(feast.localRegistryTls()).To(BeTrue()) + Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(OnlineFeastType)).To(BeTrue()) + Expect(feast.isOpenShiftTls(UIFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(RegistryFeastType)).To(BeTrue()) + + openshiftTls, err = feast.checkOpenshiftTls() + Expect(err).ToNot(HaveOccurred()) + Expect(openshiftTls).To(BeTrue()) + + // all services w/ openshift tls + feast.Handler.FeatureStore = minimalFeatureStoreWithAllServers() + err = feast.ApplyDefaults() + Expect(err).ToNot(HaveOccurred()) + + repoConfig, err := getClientRepoConfig(feast.Handler.FeatureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.OfflineStore.Port).To(Equal(HttpsPort)) + Expect(repoConfig.OfflineStore.Scheme).To(Equal(HttpsScheme)) + Expect(repoConfig.OfflineStore.Cert).To(ContainSubstring(string(OfflineFeastType))) + Expect(repoConfig.OnlineStore.Cert).To(ContainSubstring(string(OnlineFeastType))) + Expect(repoConfig.Registry.Cert).To(ContainSubstring(string(RegistryFeastType))) + + tls = feast.getTlsConfigs(OfflineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + Expect(tls.SecretRef).NotTo(BeNil()) + Expect(tls.SecretRef.Name).To(Equal("feast-test-offline-tls")) + tls = feast.getTlsConfigs(OnlineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + Expect(tls.SecretRef).NotTo(BeNil()) + Expect(tls.SecretRef.Name).To(Equal("feast-test-online-tls")) + tls = feast.getTlsConfigs(RegistryFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.SecretRef).NotTo(BeNil()) + Expect(tls.SecretRef.Name).To(Equal("feast-test-registry-tls")) + Expect(tls.SecretKeyNames).To(Equal(secretKeyNames)) + Expect(tls.IsTLS()).To(BeTrue()) + tls = feast.getTlsConfigs(UIFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.SecretRef).NotTo(BeNil()) + Expect(tls.SecretRef.Name).To(Equal("feast-test-ui-tls")) + Expect(tls.SecretKeyNames).To(Equal(secretKeyNames)) + Expect(tls.IsTLS()).To(BeTrue()) + + Expect(feast.remoteRegistryTls()).To(BeFalse()) + Expect(feast.localRegistryTls()).To(BeTrue()) + Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeTrue()) + Expect(feast.isOpenShiftTls(OnlineFeastType)).To(BeTrue()) + Expect(feast.isOpenShiftTls(RegistryFeastType)).To(BeTrue()) + Expect(feast.isOpenShiftTls(UIFeastType)).To(BeTrue()) + openshiftTls, err = feast.checkOpenshiftTls() + Expect(err).ToNot(HaveOccurred()) + Expect(openshiftTls).To(BeTrue()) + + // check k8s deployment objects + feastDeploy := feast.initFeastDeploy() + err = feast.setDeployment(feastDeploy) + Expect(err).ToNot(HaveOccurred()) + Expect(feastDeploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + Expect(feastDeploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + Expect(feastDeploy.Spec.Template.Spec.Containers[0].Command).To(ContainElements(ContainSubstring("--key"))) + Expect(feastDeploy.Spec.Template.Spec.Containers[1].Command).To(ContainElements(ContainSubstring("--key"))) + Expect(feastDeploy.Spec.Template.Spec.Containers[2].Command).To(ContainElements(ContainSubstring("--key"))) + Expect(feastDeploy.Spec.Template.Spec.Containers[3].Command).To(ContainElements(ContainSubstring("--key"))) + Expect(feastDeploy.Spec.Template.Spec.Volumes).To(HaveLen(5)) + + // registry service w/ tls and in an openshift cluster + feast.Handler.FeatureStore = minimalFeatureStore() + feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{}, + }, + }, + UI: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{}, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{}, + SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + TlsCrt: "test.crt", + }, + }, + }, + }, + }, + } + err = feast.ApplyDefaults() + Expect(err).ToNot(HaveOccurred()) + + tls = feast.getTlsConfigs(OfflineFeastType) + Expect(tls).To(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + tls = feast.getTlsConfigs(OnlineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + tls = feast.getTlsConfigs(UIFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + tls = feast.getTlsConfigs(RegistryFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + Expect(tls.SecretKeyNames).NotTo(Equal(secretKeyNames)) + Expect(getPortStr(tls)).To(Equal("443")) + Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) + Expect(feast.remoteRegistryTls()).To(BeFalse()) + Expect(feast.localRegistryTls()).To(BeTrue()) + Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(OnlineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(UIFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(RegistryFeastType)).To(BeFalse()) + openshiftTls, err = feast.checkOpenshiftTls() + Expect(err).ToNot(HaveOccurred()) + Expect(openshiftTls).To(BeFalse()) + + // all services w/ tls and in an openshift cluster + feast.Handler.FeatureStore = minimalFeatureStoreWithAllServers() + disable := true + feast.Handler.FeatureStore.Spec.Services.OnlineStore = &feastdevv1alpha1.OnlineStore{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{ + Disable: &disable, + }, + }, + } + feast.Handler.FeatureStore.Spec.Services.UI.TLS = &feastdevv1alpha1.TlsConfigs{ + Disable: &disable, + } + feast.Handler.FeatureStore.Spec.Services.Registry = &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Server: &feastdevv1alpha1.ServerConfigs{ + TLS: &feastdevv1alpha1.TlsConfigs{ + Disable: &disable, + }, + }, + }, + } + err = feast.ApplyDefaults() + Expect(err).ToNot(HaveOccurred()) + + repoConfig, err = getClientRepoConfig(feast.Handler.FeatureStore, emptyMockExtractConfigFromSecret) + Expect(err).NotTo(HaveOccurred()) + Expect(repoConfig.OfflineStore.Port).To(Equal(HttpsPort)) + Expect(repoConfig.OfflineStore.Scheme).To(Equal(HttpsScheme)) + Expect(repoConfig.OfflineStore.Cert).To(ContainSubstring(string(OfflineFeastType))) + Expect(repoConfig.OnlineStore.Cert).NotTo(ContainSubstring(string(OnlineFeastType))) + Expect(repoConfig.Registry.Cert).NotTo(ContainSubstring(string(RegistryFeastType))) + + tls = feast.getTlsConfigs(OfflineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeTrue()) + Expect(tls.SecretKeyNames).To(Equal(secretKeyNames)) + tls = feast.getTlsConfigs(OnlineFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + Expect(tls.SecretKeyNames).NotTo(Equal(secretKeyNames)) + tls = feast.getTlsConfigs(UIFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + Expect(tls.SecretKeyNames).NotTo(Equal(secretKeyNames)) + tls = feast.getTlsConfigs(RegistryFeastType) + Expect(tls).NotTo(BeNil()) + Expect(tls.IsTLS()).To(BeFalse()) + Expect(tls.SecretKeyNames).NotTo(Equal(secretKeyNames)) + Expect(getPortStr(tls)).To(Equal("80")) + Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) + + Expect(feast.remoteRegistryTls()).To(BeFalse()) + Expect(feast.localRegistryTls()).To(BeFalse()) + Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeTrue()) + Expect(feast.isOpenShiftTls(OnlineFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(UIFeastType)).To(BeFalse()) + Expect(feast.isOpenShiftTls(RegistryFeastType)).To(BeFalse()) + openshiftTls, err = feast.checkOpenshiftTls() + Expect(err).ToNot(HaveOccurred()) + Expect(openshiftTls).To(BeTrue()) + + // check k8s service objects + offlineSvc := feast.initFeastSvc(OfflineFeastType) + Expect(offlineSvc.Annotations).To(BeEmpty()) + err = feast.setService(offlineSvc, OfflineFeastType) + Expect(err).ToNot(HaveOccurred()) + Expect(offlineSvc.Annotations).NotTo(BeEmpty()) + Expect(offlineSvc.Spec.Ports[0].Name).To(Equal(HttpsScheme)) + + onlineSvc := feast.initFeastSvc(OnlineFeastType) + err = feast.setService(onlineSvc, OnlineFeastType) + Expect(err).ToNot(HaveOccurred()) + Expect(onlineSvc.Annotations).To(BeEmpty()) + Expect(onlineSvc.Spec.Ports[0].Name).To(Equal(HttpScheme)) + + uiSvc := feast.initFeastSvc(UIFeastType) + err = feast.setService(uiSvc, UIFeastType) + Expect(err).ToNot(HaveOccurred()) + Expect(uiSvc.Annotations).To(BeEmpty()) + Expect(uiSvc.Spec.Ports[0].Name).To(Equal(HttpScheme)) + + // check k8s deployment objects + feastDeploy = feast.initFeastDeploy() + err = feast.setDeployment(feastDeploy) + Expect(err).ToNot(HaveOccurred()) + Expect(feastDeploy.Spec.Template.Spec.Containers).To(HaveLen(4)) + Expect(GetOfflineContainer(*feastDeploy)).NotTo(BeNil()) + Expect(feastDeploy.Spec.Template.Spec.Volumes).To(HaveLen(2)) + + Expect(GetRegistryContainer(*feastDeploy).Command).NotTo(ContainElements(ContainSubstring("--key"))) + Expect(GetRegistryContainer(*feastDeploy).VolumeMounts).To(HaveLen(1)) + Expect(GetOfflineContainer(*feastDeploy).Command).To(ContainElements(ContainSubstring("--key"))) + Expect(GetOfflineContainer(*feastDeploy).VolumeMounts).To(HaveLen(2)) + Expect(GetOnlineContainer(*feastDeploy).Command).NotTo(ContainElements(ContainSubstring("--key"))) + Expect(GetOnlineContainer(*feastDeploy).VolumeMounts).To(HaveLen(1)) + Expect(GetUIContainer(*feastDeploy).Command).NotTo(ContainElements(ContainSubstring("--key"))) + Expect(GetUIContainer(*feastDeploy).VolumeMounts).To(HaveLen(1)) + + }) + }) +}) diff --git a/infra/feast-operator/internal/controller/services/util.go b/infra/feast-operator/internal/controller/services/util.go new file mode 100644 index 00000000000..41f3e837157 --- /dev/null +++ b/infra/feast-operator/internal/controller/services/util.go @@ -0,0 +1,470 @@ +package services + +import ( + "fmt" + "os" + "reflect" + "slices" + "strings" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var isOpenShift = false + +func IsRegistryServer(featureStore *feastdevv1alpha1.FeatureStore) bool { + return IsLocalRegistry(featureStore) && featureStore.Status.Applied.Services.Registry.Local.Server != nil +} + +func IsLocalRegistry(featureStore *feastdevv1alpha1.FeatureStore) bool { + appliedServices := featureStore.Status.Applied.Services + return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Local != nil +} + +func isRemoteRegistry(featureStore *feastdevv1alpha1.FeatureStore) bool { + appliedServices := featureStore.Status.Applied.Services + return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Remote != nil +} + +func hasPvcConfig(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) (*feastdevv1alpha1.PvcConfig, bool) { + var pvcConfig *feastdevv1alpha1.PvcConfig + services := featureStore.Status.Applied.Services + if services != nil { + switch feastType { + case OnlineFeastType: + if services.OnlineStore != nil && services.OnlineStore.Persistence != nil && + services.OnlineStore.Persistence.FilePersistence != nil { + pvcConfig = services.OnlineStore.Persistence.FilePersistence.PvcConfig + } + case OfflineFeastType: + if services.OfflineStore != nil && services.OfflineStore.Persistence != nil && + services.OfflineStore.Persistence.FilePersistence != nil { + pvcConfig = services.OfflineStore.Persistence.FilePersistence.PvcConfig + } + case RegistryFeastType: + if IsLocalRegistry(featureStore) && services.Registry.Local.Persistence != nil && + services.Registry.Local.Persistence.FilePersistence != nil { + pvcConfig = services.Registry.Local.Persistence.FilePersistence.PvcConfig + } + } + } + return pvcConfig, pvcConfig != nil +} + +func shouldCreatePvc(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) (*feastdevv1alpha1.PvcCreate, bool) { + if pvcConfig, ok := hasPvcConfig(featureStore, feastType); ok { + return pvcConfig.Create, pvcConfig.Create != nil + } + return nil, false +} + +func shouldMountEmptyDir(featureStore *feastdevv1alpha1.FeatureStore) bool { + for _, feastType := range feastServerTypes { + if _, ok := hasPvcConfig(featureStore, feastType); !ok { + return true + } + } + return false +} + +func getOfflineMountPath(featureStore *feastdevv1alpha1.FeatureStore) string { + if pvcConfig, ok := hasPvcConfig(featureStore, OfflineFeastType); ok { + return pvcConfig.MountPath + } + return EphemeralPath +} + +func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { + // overwrite status.applied with every reconcile + cr.Spec.DeepCopyInto(&cr.Status.Applied) + cr.Status.FeastVersion = feastversion.FeastVersion + + applied := &cr.Status.Applied + if applied.FeastProjectDir == nil { + applied.FeastProjectDir = &feastdevv1alpha1.FeastProjectDir{ + Init: &feastdevv1alpha1.FeastInitOptions{}, + } + } + if applied.Services == nil { + applied.Services = &feastdevv1alpha1.FeatureStoreServices{} + } + services := applied.Services + + if services.Registry != nil { + // if remote registry not set, proceed w/ local registry defaults + if services.Registry.Remote == nil { + // if local registry not set, apply an empty pointer struct + if services.Registry.Local == nil { + services.Registry.Local = &feastdevv1alpha1.LocalRegistryConfig{} + } + if services.Registry.Local.Persistence == nil { + services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{} + } + + if services.Registry.Local.Persistence.DBPersistence == nil { + if services.Registry.Local.Persistence.FilePersistence == nil { + services.Registry.Local.Persistence.FilePersistence = &feastdevv1alpha1.RegistryFilePersistence{} + } + + if len(services.Registry.Local.Persistence.FilePersistence.Path) == 0 { + services.Registry.Local.Persistence.FilePersistence.Path = defaultRegistryPath(cr) + } + + ensurePVCDefaults(services.Registry.Local.Persistence.FilePersistence.PvcConfig, RegistryFeastType) + } + + if services.Registry.Local.Server != nil { + setDefaultCtrConfigs(&services.Registry.Local.Server.ContainerConfigs.DefaultCtrConfigs) + } + } else if services.Registry.Remote.FeastRef != nil && len(services.Registry.Remote.FeastRef.Namespace) == 0 { + services.Registry.Remote.FeastRef.Namespace = cr.Namespace + } + } + + if services.OfflineStore != nil { + if services.OfflineStore.Persistence == nil { + services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{} + } + + if services.OfflineStore.Persistence.DBPersistence == nil { + if services.OfflineStore.Persistence.FilePersistence == nil { + services.OfflineStore.Persistence.FilePersistence = &feastdevv1alpha1.OfflineStoreFilePersistence{} + } + + if len(services.OfflineStore.Persistence.FilePersistence.Type) == 0 { + services.OfflineStore.Persistence.FilePersistence.Type = string(OfflineFilePersistenceDaskConfigType) + } + + ensurePVCDefaults(services.OfflineStore.Persistence.FilePersistence.PvcConfig, OfflineFeastType) + } + + if services.OfflineStore.Server != nil { + setDefaultCtrConfigs(&services.OfflineStore.Server.ContainerConfigs.DefaultCtrConfigs) + } + } + + // default to onlineStore service deployment + if services.OnlineStore == nil { + services.OnlineStore = &feastdevv1alpha1.OnlineStore{} + } + if services.OnlineStore.Persistence == nil { + services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{} + } + + if services.OnlineStore.Persistence.DBPersistence == nil { + if services.OnlineStore.Persistence.FilePersistence == nil { + services.OnlineStore.Persistence.FilePersistence = &feastdevv1alpha1.OnlineStoreFilePersistence{} + } + + if len(services.OnlineStore.Persistence.FilePersistence.Path) == 0 { + services.OnlineStore.Persistence.FilePersistence.Path = defaultOnlineStorePath(cr) + } + + ensurePVCDefaults(services.OnlineStore.Persistence.FilePersistence.PvcConfig, OnlineFeastType) + } + + if services.OnlineStore.Server == nil { + services.OnlineStore.Server = &feastdevv1alpha1.ServerConfigs{} + } + setDefaultCtrConfigs(&services.OnlineStore.Server.ContainerConfigs.DefaultCtrConfigs) + + if services.UI != nil { + setDefaultCtrConfigs(&services.UI.ContainerConfigs.DefaultCtrConfigs) + } +} + +func setDefaultCtrConfigs(defaultConfigs *feastdevv1alpha1.DefaultCtrConfigs) { + if defaultConfigs.Image == nil { + img := getFeatureServerImage() + defaultConfigs.Image = &img + } +} + +func getFeatureServerImage() string { + if img, exists := os.LookupEnv(feastServerImageVar); exists { + return img + } + return DefaultImage +} + +func checkOfflineStoreFilePersistenceType(value string) error { + if slices.Contains(feastdevv1alpha1.ValidOfflineStoreFilePersistenceTypes, value) { + return nil + } + return fmt.Errorf("invalid file type %s for offline store", value) +} + +func ensureRequestedStorage(resources *corev1.VolumeResourceRequirements, requestedStorage string) { + if resources.Requests == nil { + resources.Requests = corev1.ResourceList{} + } + if _, ok := resources.Requests[corev1.ResourceStorage]; !ok { + resources.Requests[corev1.ResourceStorage] = resource.MustParse(requestedStorage) + } +} + +func ensurePVCDefaults(pvc *feastdevv1alpha1.PvcConfig, feastType FeastServiceType) { + if pvc != nil { + var storageRequest string + switch feastType { + case OnlineFeastType: + storageRequest = DefaultOnlineStorageRequest + case OfflineFeastType: + storageRequest = DefaultOfflineStorageRequest + case RegistryFeastType: + storageRequest = DefaultRegistryStorageRequest + } + if pvc.Create != nil { + ensureRequestedStorage(&pvc.Create.Resources, storageRequest) + if pvc.Create.AccessModes == nil { + pvc.Create.AccessModes = DefaultPVCAccessModes + } + } + } +} + +func defaultOnlineStorePath(featureStore *feastdevv1alpha1.FeatureStore) string { + if _, ok := hasPvcConfig(featureStore, OnlineFeastType); ok { + return DefaultOnlineStorePath + } + // if pvc not set, use the ephemeral mount path. + return EphemeralPath + "/" + DefaultOnlineStorePath +} + +func defaultRegistryPath(featureStore *feastdevv1alpha1.FeatureStore) string { + if _, ok := hasPvcConfig(featureStore, RegistryFeastType); ok { + return DefaultRegistryPath + } + // if pvc not set, use the ephemeral mount path. + return EphemeralPath + "/" + DefaultRegistryPath +} + +func checkOfflineStoreDBStorePersistenceType(value string) error { + if slices.Contains(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes, value) { + return nil + } + return fmt.Errorf("invalid DB store type %s for offline store", value) +} + +func checkOnlineStoreDBStorePersistenceType(value string) error { + if slices.Contains(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes, value) { + return nil + } + return fmt.Errorf("invalid DB store type %s for online store", value) +} + +func checkRegistryDBStorePersistenceType(value string) error { + if slices.Contains(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes, value) { + return nil + } + return fmt.Errorf("invalid DB store type %s for registry", value) +} + +func (feast *FeastServices) getSecret(secretRef string) (*corev1.Secret, error) { + logger := log.FromContext(feast.Handler.Context) + secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: secretRef, Namespace: feast.Handler.FeatureStore.Namespace}} + objectKey := client.ObjectKeyFromObject(secret) + if err := feast.Handler.Client.Get(feast.Handler.Context, objectKey, secret); err != nil { + if apierrors.IsNotFound(err) { + logger.Error(err, "invalid secret "+secretRef+" for offline store") + } + return nil, err + } + + return secret, nil +} + +// Function to check if a struct has a specific field or field tag and sets the value in the field if empty +func hasAttrib(s interface{}, fieldName string, value interface{}) (bool, error) { + val := reflect.ValueOf(s) + + // Check that the object is a pointer so we can modify it + if val.Kind() != reflect.Ptr || val.IsNil() { + return false, fmt.Errorf("expected a pointer to struct, got %v", val.Kind()) + } + + val = val.Elem() + + // Loop through the fields and check the tag + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + fieldType := val.Type().Field(i) + + tagVal := fieldType.Tag.Get("yaml") + + // Remove other metadata if exists + commaIndex := strings.Index(tagVal, ",") + + if commaIndex != -1 { + tagVal = tagVal[:commaIndex] + } + + // Check if the field name or the tag value matches the one we're looking for + if strings.EqualFold(fieldType.Name, fieldName) || strings.EqualFold(tagVal, fieldName) { + + // Ensure the field is settable + if !field.CanSet() { + return false, fmt.Errorf("cannot set field %s", fieldName) + } + + // Check if the field is empty (zero value) + if field.IsZero() { + // Set the field value only if it's empty + field.Set(reflect.ValueOf(value)) + } + + return true, nil + } + } + + return false, nil +} + +func CopyMap(original map[string]interface{}) map[string]interface{} { + // Create a new map to store the copy + newCopy := make(map[string]interface{}) + + // Loop through the original map and copy each key-value pair + for key, value := range original { + newCopy[key] = value + } + + return newCopy +} + +// IsOpenShift is a global flag that can be safely called across reconciliation cycles, defined at the controller manager start. +func IsOpenShift() bool { + return isOpenShift +} + +// SetIsOpenShift sets the global flag isOpenShift by the controller manager. +// We don't need to keep fetching the API every reconciliation cycle that we need to know about the platform. +func SetIsOpenShift(cfg *rest.Config) { + if cfg == nil { + panic("Rest Config struct is nil, impossible to get cluster information") + } + // adapted from https://github.com/RHsyseng/operator-utils/blob/a226fabb2226a313dd3a16591c5579ebcd8a74b0/internal/platform/platform_versioner.go#L95 + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + panic(fmt.Sprintf("Impossible to get new client for config when fetching cluster information: %s", err)) + } + apiList, err := client.ServerGroups() + if err != nil { + panic(fmt.Sprintf("issue occurred while fetching ServerGroups: %s", err)) + } + + for _, v := range apiList.Groups { + if v.Name == "route.openshift.io" { + isOpenShift = true + break + } + } +} + +func missingOidcSecretProperty(property OidcPropertyType) error { + return fmt.Errorf(OidcMissingSecretError, property) +} + +// getEnvVar returns the position of the EnvVar found by name +func getEnvVar(envName string, env []corev1.EnvVar) int { + for pos, v := range env { + if v.Name == envName { + return pos + } + } + return -1 +} + +// envOverride replaces or appends the provided EnvVar to the collection +func envOverride(dst, src []corev1.EnvVar) []corev1.EnvVar { + for _, cre := range src { + pos := getEnvVar(cre.Name, dst) + if pos != -1 { + dst[pos] = cre + } else { + dst = append(dst, cre) + } + } + return dst +} + +func GetRegistryContainer(deployment appsv1.Deployment) *corev1.Container { + _, container := getContainerByType(RegistryFeastType, deployment.Spec.Template.Spec) + return container +} + +func GetOfflineContainer(deployment appsv1.Deployment) *corev1.Container { + _, container := getContainerByType(OfflineFeastType, deployment.Spec.Template.Spec) + return container +} + +func GetUIContainer(deployment appsv1.Deployment) *corev1.Container { + _, container := getContainerByType(UIFeastType, deployment.Spec.Template.Spec) + return container +} + +func GetOnlineContainer(deployment appsv1.Deployment) *corev1.Container { + _, container := getContainerByType(OnlineFeastType, deployment.Spec.Template.Spec) + return container +} + +func getContainerByType(feastType FeastServiceType, podSpec corev1.PodSpec) (int, *corev1.Container) { + for i, c := range podSpec.Containers { + if c.Name == string(feastType) { + return i, &c + } + } + return -1, nil +} + +func GetRegistryVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { + return getVolumeByType(RegistryFeastType, featureStore, volumes) +} + +func GetOnlineVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { + return getVolumeByType(OnlineFeastType, featureStore, volumes) +} + +func GetOfflineVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { + return getVolumeByType(OfflineFeastType, featureStore, volumes) +} + +func getVolumeByType(feastType FeastServiceType, featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { + for _, v := range volumes { + if v.Name == GetFeastServiceName(featureStore, feastType) { + return &v + } + } + return nil +} + +func GetRegistryVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { + return getVolumeMountByType(RegistryFeastType, featureStore, volumeMounts) +} + +func GetOnlineVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { + return getVolumeMountByType(OnlineFeastType, featureStore, volumeMounts) +} + +func GetOfflineVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { + return getVolumeMountByType(OfflineFeastType, featureStore, volumeMounts) +} + +func getVolumeMountByType(feastType FeastServiceType, featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { + for _, vm := range volumeMounts { + if vm.Name == GetFeastServiceName(featureStore, feastType) { + return &vm + } + } + return nil +} diff --git a/infra/feast-operator/internal/controller/suite_test.go b/infra/feast-operator/internal/controller/suite_test.go index 57091df5c00..51208d6dbb0 100644 --- a/infra/feast-operator/internal/controller/suite_test.go +++ b/infra/feast-operator/internal/controller/suite_test.go @@ -26,20 +26,18 @@ import ( . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment @@ -66,16 +64,14 @@ var _ = BeforeSuite(func() { fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), } - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() + cfg, err := testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) err = feastdevv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) diff --git a/infra/feast-operator/test/api/featurestore_types_test.go b/infra/feast-operator/test/api/featurestore_types_test.go new file mode 100644 index 00000000000..12a7406e80d --- /dev/null +++ b/infra/feast-operator/test/api/featurestore_types_test.go @@ -0,0 +1,479 @@ +package api + +import ( + "context" + "fmt" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/log" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func createFeatureStore() *feastdevv1alpha1.FeatureStore { + return &feastdevv1alpha1.FeatureStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: namespaceName, + }, + Spec: feastdevv1alpha1.FeatureStoreSpec{ + FeastProject: "test_project", + }, + } +} + +func attemptInvalidCreationAndAsserts(ctx context.Context, featurestore *feastdevv1alpha1.FeatureStore, matcher string) { + By("Creating the resource") + logger := log.FromContext(ctx) + logger.Info("Creating", "FeatureStore", featurestore) + err := k8sClient.Create(ctx, featurestore) + logger.Info("Got", "err", err) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring(matcher)) +} + +func onlineStoreWithAbsolutePathForPvc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: "/data/online_store.db", + PvcConfig: &feastdevv1alpha1.PvcConfig{}, + }, + }, + }, + } + return fsCopy +} +func onlineStoreWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: "data/online_store.db", + }, + }, + }, + } + return fsCopy +} + +func onlineStoreWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + Path: path, + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: "/data/online", + }, + }, + }, + }, + } + return fsCopy +} + +func offlineStoreWithUnmanagedFileType(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + Type: "unmanaged", + }, + }, + }, + } + return fsCopy +} + +func registryWithAbsolutePathForPvc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: "/data/registry.db", + PvcConfig: &feastdevv1alpha1.PvcConfig{}, + }}, + }, + }, + } + return fsCopy +} +func registryWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: "data/online_store.db", + }, + }, + }, + }, + } + return fsCopy +} +func registryWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: path, + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: "/data/registry", + }, + }, + }, + }, + }, + } + return fsCopy +} +func registryWithS3AdditionalKeywordsForFile(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: "/data/online_store.db", + S3AdditionalKwargs: &map[string]string{}, + }, + }, + }, + }, + } + return fsCopy +} +func registryWithS3AdditionalKeywordsForGsBucket(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Path: "gs://online_store.db", + S3AdditionalKwargs: &map[string]string{}, + }, + }, + }, + }, + } + return fsCopy +} + +func pvcConfigWithNeitherRefNorCreate(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{}, + }, + }, + }, + } + return fsCopy +} +func pvcConfigWithBothRefAndCreate(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Ref: &corev1.LocalObjectReference{ + Name: "pvc", + }, + Create: &feastdevv1alpha1.PvcCreate{}, + }, + }, + }, + }, + } + return fsCopy +} + +func pvcConfigWithNoResources(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: "/data/offline", + }, + }, + }, + }, + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: "/data/online", + }, + }, + }, + }, + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + PvcConfig: &feastdevv1alpha1.PvcConfig{ + Create: &feastdevv1alpha1.PvcCreate{}, + MountPath: "/data/registry", + }, + }, + }, + }, + }, + } + return fsCopy +} + +func pvcConfigWithResources(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := pvcConfigWithNoResources(featureStore) + fsCopy.Spec.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources = corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + } + fsCopy.Spec.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.Resources = corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + } + fsCopy.Spec.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.Resources = corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("500Mi"), + }, + } + return fsCopy +} + +func authzConfigWithKubernetes(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + if fsCopy.Spec.AuthzConfig == nil { + fsCopy.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{} + } + fsCopy.Spec.AuthzConfig.KubernetesAuthz = &feastdevv1alpha1.KubernetesAuthz{ + Roles: []string{}, + } + return fsCopy +} +func authzConfigWithOidc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + if fsCopy.Spec.AuthzConfig == nil { + fsCopy.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{} + } + fsCopy.Spec.AuthzConfig.OidcAuthz = &feastdevv1alpha1.OidcAuthz{} + return fsCopy +} + +func onlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OnlineStore: &feastdevv1alpha1.OnlineStore{ + Persistence: &feastdevv1alpha1.OnlineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OnlineStoreDBStorePersistence{ + Type: dbPersistenceType, + }, + }, + }, + } + return fsCopy +} + +func offlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + OfflineStore: &feastdevv1alpha1.OfflineStore{ + Persistence: &feastdevv1alpha1.OfflineStorePersistence{ + DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ + Type: dbPersistenceType, + }, + }, + }, + } + return fsCopy +} + +func registryStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { + fsCopy := featureStore.DeepCopy() + fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ + Registry: &feastdevv1alpha1.Registry{ + Local: &feastdevv1alpha1.LocalRegistryConfig{ + Persistence: &feastdevv1alpha1.RegistryPersistence{ + DBPersistence: &feastdevv1alpha1.RegistryDBStorePersistence{ + Type: dbPersistenceType, + }, + }, + }, + }, + } + return fsCopy +} + +func quotedSlice(stringSlice []string) string { + quotedSlice := make([]string, len(stringSlice)) + + for i, str := range stringSlice { + quotedSlice[i] = fmt.Sprintf("\"%s\"", str) + } + + return strings.Join(quotedSlice, ", ") +} + +const resourceName = "test-resource" +const namespaceName = "default" + +var typeNamespacedName = types.NamespacedName{ + Name: resourceName, + Namespace: "default", +} + +func initContext() (context.Context, *feastdevv1alpha1.FeatureStore) { + ctx := context.Background() + + featurestore := createFeatureStore() + + BeforeEach(func() { + By("verifying the custom resource FeatureStore is not there") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + Expect(err != nil && errors.IsNotFound(err)).To(BeTrue()) + }) + AfterEach(func() { + By("verifying the custom resource FeatureStore is not there") + err := k8sClient.Get(ctx, typeNamespacedName, featurestore) + Expect(err != nil && errors.IsNotFound(err)).To(BeTrue()) + }) + + return ctx, featurestore +} + +var _ = Describe("FeatureStore API", func() { + Context("When creating an invalid Online Store", func() { + ctx, featurestore := initContext() + + It("should fail when PVC persistence has absolute path", func() { + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithAbsolutePathForPvc(featurestore), "PVC path must be a file name only") + }) + It("should fail when ephemeral persistence has relative path", func() { + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithRelativePathForEphemeral(featurestore), "Ephemeral stores must have absolute paths") + }) + It("should fail when PVC persistence has object store bucket", func() { + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithObjectStoreBucketForPvc("s3://bucket/online_store.db", featurestore), "Online store does not support S3 or GS") + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithObjectStoreBucketForPvc("gs://bucket/online_store.db", featurestore), "Online store does not support S3 or GS") + }) + + It("should fail when db persistence type is invalid", func() { + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes)) + }) + }) + + Context("When creating an invalid Offline Store", func() { + ctx, featurestore := initContext() + + It("should fail when PVC persistence has absolute path", func() { + attemptInvalidCreationAndAsserts(ctx, offlineStoreWithUnmanagedFileType(featurestore), "Unsupported value") + }) + It("should fail when db persistence type is invalid", func() { + attemptInvalidCreationAndAsserts(ctx, offlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes)) + }) + }) + + Context("When creating an invalid Registry", func() { + ctx, featurestore := initContext() + + It("should fail when PVC persistence has absolute path", func() { + attemptInvalidCreationAndAsserts(ctx, registryWithAbsolutePathForPvc(featurestore), "PVC path must be a file name only") + }) + It("should fail when ephemeral persistence has relative path", func() { + attemptInvalidCreationAndAsserts(ctx, registryWithRelativePathForEphemeral(featurestore), "Registry files must use absolute paths or be S3 ('s3://') or GS ('gs://')") + }) + It("should fail when PVC persistence has object store bucket", func() { + attemptInvalidCreationAndAsserts(ctx, registryWithObjectStoreBucketForPvc("s3://bucket/registry.db", featurestore), "PVC persistence does not support S3 or GS object store URIs") + attemptInvalidCreationAndAsserts(ctx, registryWithObjectStoreBucketForPvc("gs://bucket/registry.db", featurestore), "PVC persistence does not support S3 or GS object store URIs") + }) + It("should fail when additional S3 settings are provided to non S3 bucket", func() { + attemptInvalidCreationAndAsserts(ctx, registryWithS3AdditionalKeywordsForFile(featurestore), "Additional S3 settings are available only for S3 object store URIs") + attemptInvalidCreationAndAsserts(ctx, registryWithS3AdditionalKeywordsForGsBucket(featurestore), "Additional S3 settings are available only for S3 object store URIs") + }) + It("should fail when db persistence type is invalid", func() { + attemptInvalidCreationAndAsserts(ctx, registryStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes)) + }) + }) + + Context("When creating an invalid PvcConfig", func() { + ctx, featurestore := initContext() + + It("should fail when neither ref nor create settings are given", func() { + attemptInvalidCreationAndAsserts(ctx, pvcConfigWithNeitherRefNorCreate(featurestore), "One selection is required") + }) + It("should fail when both ref and create settings are given", func() { + attemptInvalidCreationAndAsserts(ctx, pvcConfigWithBothRefAndCreate(featurestore), "One selection is required") + }) + }) + + Context("When creating a valid PvcConfig", func() { + _, featurestore := initContext() + + It("should set the expected defaults", func() { + resource := pvcConfigWithNoResources(featurestore) + services.ApplyDefaultsToStatus(resource) + + storage := resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("20Gi")) + storage = resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("5Gi")) + storage = resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("5Gi")) + }) + It("should not override the configured resources", func() { + resource := pvcConfigWithResources(featurestore) + services.ApplyDefaultsToStatus(resource) + storage := resource.Status.Applied.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("10Gi")) + storage = resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("1Gi")) + storage = resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.PvcConfig.Create.Resources.Requests.Storage().String() + Expect(storage).To(Equal("500Mi")) + }) + }) + Context("When omitting the AuthzConfig PvcConfig", func() { + _, featurestore := initContext() + It("should keep an empty AuthzConfig", func() { + resource := featurestore + services.ApplyDefaultsToStatus(resource) + Expect(resource.Status.Applied.AuthzConfig).To(BeNil()) + }) + }) + Context("When configuring the AuthzConfig", func() { + ctx, featurestore := initContext() + It("should fail when both kubernetes and oidc settings are given", func() { + attemptInvalidCreationAndAsserts(ctx, authzConfigWithOidc(authzConfigWithKubernetes(featurestore)), "One selection required between kubernetes or oidc") + }) + }) +}) diff --git a/infra/feast-operator/test/api/suite_test.go b/infra/feast-operator/test/api/suite_test.go new file mode 100644 index 00000000000..e8c46a240c1 --- /dev/null +++ b/infra/feast-operator/test/api/suite_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestApis(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Api Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = feastdevv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/infra/feast-operator/test/data-source-types/data-source-types.py b/infra/feast-operator/test/data-source-types/data-source-types.py new file mode 100644 index 00000000000..be7d70e5ede --- /dev/null +++ b/infra/feast-operator/test/data-source-types/data-source-types.py @@ -0,0 +1,18 @@ +import os +from feast.repo_config import REGISTRY_CLASS_FOR_TYPE, OFFLINE_STORE_CLASS_FOR_TYPE, ONLINE_STORE_CLASS_FOR_TYPE, LEGACY_ONLINE_STORE_CLASS_FOR_TYPE + +def save_in_script_directory(filename: str, typedict: dict[str, str]): + script_dir = os.path.dirname(os.path.abspath(__file__)) + file_path = os.path.join(script_dir, filename) + + with open(file_path, 'w') as file: + for k in typedict.keys(): + file.write(k+"\n") + +for legacyType in LEGACY_ONLINE_STORE_CLASS_FOR_TYPE.keys(): + if legacyType in ONLINE_STORE_CLASS_FOR_TYPE: + del ONLINE_STORE_CLASS_FOR_TYPE[legacyType] + +save_in_script_directory("registry.out", REGISTRY_CLASS_FOR_TYPE) +save_in_script_directory("online-store.out", ONLINE_STORE_CLASS_FOR_TYPE) +save_in_script_directory("offline-store.out", OFFLINE_STORE_CLASS_FOR_TYPE) diff --git a/infra/feast-operator/test/data-source-types/data_source_types_test.go b/infra/feast-operator/test/data-source-types/data_source_types_test.go new file mode 100644 index 00000000000..8448b2c4212 --- /dev/null +++ b/infra/feast-operator/test/data-source-types/data_source_types_test.go @@ -0,0 +1,88 @@ +package datasources + +import ( + "bufio" + "os" + "slices" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" +) + +func TestDataSourceTypes(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Data Source Suite") +} + +var _ = Describe("FeatureStore Data Source Types", func() { + Context("When checking against the python code in feast.repo_config", func() { + It("should match defined registry persistence types in the operator", func() { + registryFilePersistenceTypes := []string{string(services.RegistryFileConfigType)} + registryPersistenceTypes := append(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes, registryFilePersistenceTypes...) + checkPythonPersistenceTypes("registry.out", registryPersistenceTypes) + }) + It("should match defined onlineStore persistence types in the operator", func() { + onlineFilePersistenceTypes := []string{string(services.OnlineSqliteConfigType)} + onlinePersistenceTypes := append(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes, onlineFilePersistenceTypes...) + checkPythonPersistenceTypes("online-store.out", onlinePersistenceTypes) + }) + It("should match defined offlineStore persistence types in the operator", func() { + offlinePersistenceTypes := append(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes, feastdevv1alpha1.ValidOfflineStoreFilePersistenceTypes...) + checkPythonPersistenceTypes("offline-store.out", offlinePersistenceTypes) + }) + }) +}) + +func checkPythonPersistenceTypes(fileName string, operatorDsTypes []string) { + feastDsTypes, err := readFileLines(fileName) + Expect(err).NotTo(HaveOccurred()) + + // Add remote type to slice, as its not a file or db type and we want to limit its use to registry service when deploying with the operator + operatorDsTypes = append(operatorDsTypes, "remote") + missingFeastTypes := []string{} + for _, ods := range operatorDsTypes { + if len(ods) > 0 { + if !slices.Contains(feastDsTypes, ods) { + missingFeastTypes = append(missingFeastTypes, ods) + } + } + } + Expect(missingFeastTypes).To(BeEmpty()) + + missingOperatorTypes := []string{} + for _, fds := range feastDsTypes { + if len(fds) > 0 { + if !slices.Contains(operatorDsTypes, fds) { + missingOperatorTypes = append(missingOperatorTypes, fds) + } + } + } + Expect(missingOperatorTypes).To(BeEmpty()) +} + +func readFileLines(filePath string) ([]string, error) { + file, err := os.Open(filePath) + Expect(err).NotTo(HaveOccurred()) + defer closeFile(file) + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + err = scanner.Err() + Expect(err).NotTo(HaveOccurred()) + + return lines, nil +} + +func closeFile(file *os.File) { + err := file.Close() + Expect(err).NotTo(HaveOccurred()) +} diff --git a/infra/feast-operator/test/e2e/e2e_suite_test.go b/infra/feast-operator/test/e2e/e2e_suite_test.go index 8e46d8a5063..c45853a0073 100644 --- a/infra/feast-operator/test/e2e/e2e_suite_test.go +++ b/infra/feast-operator/test/e2e/e2e_suite_test.go @@ -27,6 +27,6 @@ import ( // Run e2e tests using the Ginkgo runner. func TestE2E(t *testing.T) { RegisterFailHandler(Fail) - fmt.Fprintf(GinkgoWriter, "Starting feast-operator suite\n") + _, _ = fmt.Fprintf(GinkgoWriter, "Starting feast-operator suite\n") RunSpecs(t, "e2e suite") } diff --git a/infra/feast-operator/test/e2e/e2e_test.go b/infra/feast-operator/test/e2e/e2e_test.go index b46b3105d22..d1051900ae5 100644 --- a/infra/feast-operator/test/e2e/e2e_test.go +++ b/infra/feast-operator/test/e2e/e2e_test.go @@ -17,106 +17,38 @@ limitations under the License. package e2e import ( - "fmt" - "os/exec" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/feast-dev/feast/infra/feast-operator/test/utils" + . "github.com/onsi/ginkgo/v2" ) -const namespace = "feast-operator-system" - var _ = Describe("controller", Ordered, func() { - BeforeAll(func() { - By("installing prometheus operator") - Expect(utils.InstallPrometheusOperator()).To(Succeed()) - - By("installing the cert-manager") - Expect(utils.InstallCertManager()).To(Succeed()) + featureStoreName := "simple-feast-setup" + feastResourceName := utils.FeastPrefix + featureStoreName + feastK8sResourceNames := []string{ + feastResourceName + "-online", + feastResourceName + "-offline", + feastResourceName + "-ui", + } + + runTestDeploySimpleCRFunc := utils.GetTestDeploySimpleCRFunc("/test/e2e", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", + featureStoreName, feastResourceName, feastK8sResourceNames) + + runTestWithRemoteRegistryFunction := utils.GetTestWithRemoteRegistryFunc("/test/e2e", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", + "test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml", + featureStoreName, feastResourceName, feastK8sResourceNames) - By("creating manager namespace") - cmd := exec.Command("kubectl", "create", "ns", namespace) - _, _ = utils.Run(cmd) + BeforeAll(func() { + utils.DeployOperatorFromCode("/test/e2e", false) }) AfterAll(func() { - By("uninstalling the Prometheus manager bundle") - utils.UninstallPrometheusOperator() - - By("uninstalling the cert-manager bundle") - utils.UninstallCertManager() - - By("removing manager namespace") - cmd := exec.Command("kubectl", "delete", "ns", namespace) - _, _ = utils.Run(cmd) + utils.DeleteOperatorDeployment("/test/e2e") }) - Context("Operator", func() { - It("should run successfully", func() { - var controllerPodName string - var err error - - // projectimage stores the name of the image used in the example - var projectimage = "example.com/feast-operator:v0.0.1" - - By("building the manager(Operator) image") - cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("loading the the manager(Operator) image on Kind") - err = utils.LoadImageToKindClusterWithName(projectimage) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("installing CRDs") - cmd = exec.Command("make", "install") - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func() error { - // Get pod name - - cmd = exec.Command("kubectl", "get", - "pods", "-l", "control-plane=controller-manager", - "-o", "go-template={{ range .items }}"+ - "{{ if not .metadata.deletionTimestamp }}"+ - "{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", namespace, - ) - - podOutput, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - podNames := utils.GetNonEmptyLines(string(podOutput)) - if len(podNames) != 1 { - return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) - } - controllerPodName = podNames[0] - ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) - - // Validate pod status - cmd = exec.Command("kubectl", "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", namespace, - ) - status, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - if string(status) != "Running" { - return fmt.Errorf("controller pod in %s status", status) - } - return nil - } - EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) - - }) + Context("Operator E2E Tests", func() { + It("Should be able to deploy and run a default feature store CR successfully", runTestDeploySimpleCRFunc) + It("Should be able to deploy and run a feature store with remote registry CR successfully", runTestWithRemoteRegistryFunction) }) }) diff --git a/infra/feast-operator/test/previous-version/previous_version_suite_test.go b/infra/feast-operator/test/previous-version/previous_version_suite_test.go new file mode 100644 index 00000000000..cd14c89d2d6 --- /dev/null +++ b/infra/feast-operator/test/previous-version/previous_version_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package previous_version + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run upgrade tests using the Ginkgo runner. +func TestPreviousVersion(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting test previous version suite\n") + RunSpecs(t, "previous version operator") +} diff --git a/infra/feast-operator/test/previous-version/previous_version_test.go b/infra/feast-operator/test/previous-version/previous_version_test.go new file mode 100644 index 00000000000..9775d239bcc --- /dev/null +++ b/infra/feast-operator/test/previous-version/previous_version_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package previous_version + +import ( + "github.com/feast-dev/feast/infra/feast-operator/test/utils" + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("previous version operator", Ordered, func() { + BeforeAll(func() { + utils.DeployPreviousVersionOperator() + }) + + AfterAll(func() { + utils.DeleteOperatorDeployment("/test/upgrade") + }) + + Context("Previous version operator Tests", func() { + feastK8sResourceNames := []string{ + utils.FeastResourceName + "-online", + utils.FeastResourceName + "-offline", + utils.FeastResourceName + "-ui", + } + + runTestDeploySimpleCRFunc := utils.GetTestDeploySimpleCRFunc("/test/upgrade", utils.GetSimplePreviousVerCR(), + utils.FeatureStoreName, utils.FeastResourceName, feastK8sResourceNames) + runTestWithRemoteRegistryFunction := utils.GetTestWithRemoteRegistryFunc("/test/upgrade", utils.GetSimplePreviousVerCR(), + utils.GetRemoteRegistryPreviousVerCR(), utils.FeatureStoreName, utils.FeastResourceName, feastK8sResourceNames) + + // Run Test on previous version operator + It("Should be able to deploy and run a default feature store CR successfully", runTestDeploySimpleCRFunc) + It("Should be able to deploy and run a feature store with remote registry CR successfully", runTestWithRemoteRegistryFunction) + }) +}) diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml new file mode 100644 index 00000000000..9edf1dd9664 --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml @@ -0,0 +1,13 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: simple-feast-setup +spec: + feastProject: my_project + services: + offlineStore: + server: {} + registry: + local: + server: {} + ui: {} diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml new file mode 100644 index 00000000000..9746e3819a5 --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml @@ -0,0 +1,15 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: simple-feast-remote-setup +spec: + feastProject: my_project + services: + offlineStore: + server: {} + ui: {} + registry: + remote: + feastRef: + name: simple-feast-setup + namespace: default \ No newline at end of file diff --git a/infra/feast-operator/test/upgrade/upgrade_suite_test.go b/infra/feast-operator/test/upgrade/upgrade_suite_test.go new file mode 100644 index 00000000000..bd0da7ab177 --- /dev/null +++ b/infra/feast-operator/test/upgrade/upgrade_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package previous_version + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run upgrade tests using the Ginkgo runner. +func TestUpgrade(t *testing.T) { + RegisterFailHandler(Fail) + _, _ = fmt.Fprintf(GinkgoWriter, "Starting upgrade operator suite\n") + RunSpecs(t, "operator upgrade") +} diff --git a/infra/feast-operator/test/upgrade/upgrade_test.go b/infra/feast-operator/test/upgrade/upgrade_test.go new file mode 100644 index 00000000000..313fa41213c --- /dev/null +++ b/infra/feast-operator/test/upgrade/upgrade_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package previous_version + +import ( + "github.com/feast-dev/feast/infra/feast-operator/test/utils" + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("operator upgrade", Ordered, func() { + BeforeAll(func() { + utils.DeployPreviousVersionOperator() + utils.DeployOperatorFromCode("/test/e2e", true) + }) + + AfterAll(func() { + utils.DeleteOperatorDeployment("/test/e2e") + }) + + Context("Operator upgrade Tests", func() { + runTestDeploySimpleCRFunc := utils.GetTestDeploySimpleCRFunc("/test/upgrade", utils.GetSimplePreviousVerCR(), + utils.FeatureStoreName, utils.FeastResourceName, []string{}) + runTestWithRemoteRegistryFunction := utils.GetTestWithRemoteRegistryFunc("/test/upgrade", utils.GetSimplePreviousVerCR(), + utils.GetRemoteRegistryPreviousVerCR(), utils.FeatureStoreName, utils.FeastResourceName, []string{}) + + // Run Test on current version operator with previous version CR + It("Should be able to deploy and run a default feature store CR successfully", runTestDeploySimpleCRFunc) + It("Should be able to deploy and run a feature store with remote registry CR successfully", runTestWithRemoteRegistryFunction) + }) +}) diff --git a/infra/feast-operator/test/utils/test_util.go b/infra/feast-operator/test/utils/test_util.go new file mode 100644 index 00000000000..b34c4272c46 --- /dev/null +++ b/infra/feast-operator/test/utils/test_util.go @@ -0,0 +1,448 @@ +package utils + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + + "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" + "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" +) + +const ( + FeastControllerNamespace = "feast-operator-system" + Timeout = 3 * time.Minute + ControllerDeploymentName = "feast-operator-controller-manager" + FeastPrefix = "feast-" + FeatureStoreName = "simple-feast-setup" + FeastResourceName = FeastPrefix + FeatureStoreName +) + +// dynamically checks if all conditions of custom resource featurestore are in "Ready" state. +func checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namespace string) error { + // Wait 10 seconds to lets the feature store status update + time.Sleep(1 * time.Minute) + + cmd := exec.Command("kubectl", "get", "featurestore", featureStoreName, "-n", namespace, "-o", "json") + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to get resource %s in namespace %s. Error: %v. Stderr: %s", + featureStoreName, namespace, err, stderr.String()) + } + + // Parse the JSON into FeatureStore + var resource v1alpha1.FeatureStore + if err := json.Unmarshal(out.Bytes(), &resource); err != nil { + return fmt.Errorf("failed to parse the resource JSON. Error: %v", err) + } + + // Validate all conditions + for _, condition := range resource.Status.Conditions { + if condition.Status != "True" { + return fmt.Errorf(" FeatureStore=%s condition '%s' is not in 'Ready' state. Status: %s", + featureStoreName, condition.Type, condition.Status) + } + } + + return nil +} + +// CheckIfDeploymentExistsAndAvailable - validates if a deployment exists and also in the availability state as True. +func CheckIfDeploymentExistsAndAvailable(namespace string, deploymentName string, timeout time.Duration) error { + var output, errOutput bytes.Buffer + + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + timeoutChan := time.After(timeout) + + for { + select { + case <-timeoutChan: + return fmt.Errorf("timed out waiting for deployment %s to become available", deploymentName) + case <-ticker.C: + // Run kubectl command + cmd := exec.Command("kubectl", "get", "deployment", deploymentName, "-n", namespace, "-o", "json") + cmd.Stdout = &output + cmd.Stderr = &errOutput + + if err := cmd.Run(); err != nil { + // Log error and retry + fmt.Printf("Deployment not yet found, we may try again to find the updated status: %s\n", errOutput.String()) + continue + } + + // Parse the JSON output into Deployment + var result appsv1.Deployment + if err := json.Unmarshal(output.Bytes(), &result); err != nil { + return fmt.Errorf("failed to parse deployment JSON: %v", err) + } + + // Check for Available condition + for _, condition := range result.Status.Conditions { + if condition.Type == "Available" && condition.Status == "True" { + return nil // Deployment is available + } + } + + // Reset buffers for the next loop iteration + output.Reset() + errOutput.Reset() + } + } +} + +// validates if a service account exists using the kubectl CLI. +func checkIfServiceAccountExists(namespace, saName string) error { + cmd := exec.Command("kubectl", "get", "sa", saName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find service account %s in namespace %s. Error: %v. Stderr: %s", + saName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), saName) { + return fmt.Errorf("service account %s not found in namespace %s", saName, namespace) + } + + return nil +} + +// validates if a config map exists using the kubectl CLI. +func checkIfConfigMapExists(namespace, configMapName string) error { + cmd := exec.Command("kubectl", "get", "cm", configMapName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find config map %s in namespace %s. Error: %v. Stderr: %s", + configMapName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), configMapName) { + return fmt.Errorf("config map %s not found in namespace %s", configMapName, namespace) + } + + return nil +} + +// validates if a kubernetes service exists using the kubectl CLI. +func checkIfKubernetesServiceExists(namespace, serviceName string) error { + cmd := exec.Command("kubectl", "get", "service", serviceName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find kubernetes service %s in namespace %s. Error: %v. Stderr: %s", + serviceName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), serviceName) { + return fmt.Errorf("kubernetes service %s not found in namespace %s", serviceName, namespace) + } + + return nil +} + +func isFeatureStoreHavingRemoteRegistry(namespace, featureStoreName string) (bool, error) { + timeout := time.Second * 30 + interval := time.Second * 2 // Poll every 2 seconds + startTime := time.Now() + + for time.Since(startTime) < timeout { + cmd := exec.Command("kubectl", "get", "featurestore", featureStoreName, "-n", namespace, + "-o=jsonpath='{.status.applied.services.registry}'") + + output, err := cmd.Output() + if err != nil { + // Retry only on transient errors + if _, ok := err.(*exec.ExitError); ok { + time.Sleep(interval) + continue + } + return false, err // Return immediately on non-transient errors + } + + // Convert output to string and trim any extra spaces + result := strings.TrimSpace(string(output)) + + // Remove single quotes if present + if strings.HasPrefix(result, "'") && strings.HasSuffix(result, "'") { + result = strings.Trim(result, "'") + } + + if result == "" { + time.Sleep(interval) // Retry if result is empty + continue + } + + // Parse the JSON into a map + var registryConfig v1alpha1.Registry + if err := json.Unmarshal([]byte(result), ®istryConfig); err != nil { + return false, err // Return false on JSON parsing failure + } + + if registryConfig.Remote == nil { + return false, nil + } + + hasHostname := registryConfig.Remote.Hostname != nil + hasValidFeastRef := registryConfig.Remote.FeastRef != nil && + registryConfig.Remote.FeastRef.Name != "" + + return hasHostname || hasValidFeastRef, nil + } + + return false, errors.New("timeout waiting for featurestore registry status to be ready") +} + +func validateTheFeatureStoreCustomResource(namespace string, featureStoreName string, feastResourceName string, feastK8sResourceNames []string, timeout time.Duration) { + hasRemoteRegistry, err := isFeatureStoreHavingRemoteRegistry(namespace, featureStoreName) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "Error occurred while checking FeatureStore %s is having remote registry or not. \nError: %v\n", + featureStoreName, err)) + + k8sResourceNames := []string{feastResourceName} + + if !hasRemoteRegistry { + feastK8sResourceNames = append(feastK8sResourceNames, feastResourceName+"-registry") + } + + for _, deploymentName := range k8sResourceNames { + By(fmt.Sprintf("validate the feast deployment: %s is up and in availability state.", deploymentName)) + err = CheckIfDeploymentExistsAndAvailable(namespace, deploymentName, timeout) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + deploymentName, err, + )) + fmt.Printf("Feast Deployment %s is available\n", deploymentName) + } + + By("Check if the feast client - kubernetes config map exists.") + configMapName := feastResourceName + "-client" + err = checkIfConfigMapExists(namespace, configMapName) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "config map %s is not available but expected to be available. \nError: %v\n", + configMapName, err, + )) + fmt.Printf("Feast Deployment client config map %s is available\n", configMapName) + + for _, serviceAccountName := range k8sResourceNames { + By(fmt.Sprintf("validate the feast service account: %s is available.", serviceAccountName)) + err = checkIfServiceAccountExists(namespace, serviceAccountName) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "Service account %s does not exist in namespace %s. Error: %v", + serviceAccountName, namespace, err, + )) + fmt.Printf("Service account %s exists in namespace %s\n", serviceAccountName, namespace) + } + + for _, serviceName := range feastK8sResourceNames { + By(fmt.Sprintf("validate the kubernetes service name: %s is available.", serviceName)) + err = checkIfKubernetesServiceExists(namespace, serviceName) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "kubernetes service %s is not available but expected to be available. \nError: %v\n", + serviceName, err, + )) + fmt.Printf("kubernetes service %s is available\n", serviceName) + } + + By(fmt.Sprintf("Checking FeatureStore customer resource: %s is in Ready Status.", featureStoreName)) + err = checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namespace) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "FeatureStore custom resource %s all conditions are not in ready state. \nError: %v\n", + featureStoreName, err, + )) + fmt.Printf("FeatureStore custom resource %s conditions are in Ready State\n", featureStoreName) +} + +// GetTestDeploySimpleCRFunc - returns a simple CR deployment function +func GetTestDeploySimpleCRFunc(testDir string, crYaml string, featureStoreName string, feastResourceName string, feastK8sResourceNames []string) func() { + return func() { + By("deploying the Simple Feast Custom Resource to Kubernetes") + namespace := "default" + + cmd := exec.Command("kubectl", "apply", "-f", crYaml, "-n", namespace) + _, cmdOutputerr := Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + + validateTheFeatureStoreCustomResource(namespace, featureStoreName, feastResourceName, feastK8sResourceNames, Timeout) + + By("deleting the feast deployment") + cmd = exec.Command("kubectl", "delete", "-f", crYaml) + _, cmdOutputerr = Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + } +} + +// GetTestWithRemoteRegistryFunc - returns a CR deployment with a remote registry function +func GetTestWithRemoteRegistryFunc(testDir string, crYaml string, remoteRegistryCRYaml string, featureStoreName string, feastResourceName string, feastK8sResourceNames []string) func() { + return func() { + By("deploying the Simple Feast Custom Resource to Kubernetes") + namespace := "default" + cmd := exec.Command("kubectl", "apply", "-f", crYaml, "-n", namespace) + _, cmdOutputErr := Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputErr).NotTo(HaveOccurred()) + + validateTheFeatureStoreCustomResource(namespace, featureStoreName, feastResourceName, feastK8sResourceNames, Timeout) + + var remoteRegistryNs = "remote-registry" + By(fmt.Sprintf("Creating the remote registry namespace=%s", remoteRegistryNs)) + cmd = exec.Command("kubectl", "create", "ns", remoteRegistryNs) + _, _ = Run(cmd, testDir) + + By("deploying the Simple Feast remote registry Custom Resource on Kubernetes") + cmd = exec.Command("kubectl", "apply", "-f", remoteRegistryCRYaml, "-n", remoteRegistryNs) + _, cmdOutputErr = Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputErr).NotTo(HaveOccurred()) + + remoteFeatureStoreName := "simple-feast-remote-setup" + remoteFeastResourceName := FeastPrefix + remoteFeatureStoreName + fixRemoteFeastK8sResourceNames(feastK8sResourceNames, remoteFeastResourceName) + validateTheFeatureStoreCustomResource(remoteRegistryNs, remoteFeatureStoreName, remoteFeastResourceName, feastK8sResourceNames, Timeout) + + By("deleting the feast remote registry deployment") + cmd = exec.Command("kubectl", "delete", "-f", remoteRegistryCRYaml, "-n", remoteRegistryNs) + _, cmdOutputErr = Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputErr).NotTo(HaveOccurred()) + + By("deleting the feast deployment") + cmd = exec.Command("kubectl", "delete", "-f", crYaml, "-n", namespace) + _, cmdOutputErr = Run(cmd, testDir) + ExpectWithOffset(1, cmdOutputErr).NotTo(HaveOccurred()) + } +} + +func fixRemoteFeastK8sResourceNames(feastK8sResourceNames []string, remoteFeastResourceName string) { + for i, feastK8sResourceName := range feastK8sResourceNames { + if index := strings.LastIndex(feastK8sResourceName, "-"); index != -1 { + feastK8sResourceNames[i] = remoteFeastResourceName + feastK8sResourceName[index:] + } + } +} + +// DeployOperatorFromCode - Creates the images for the operator and deploys it +func DeployOperatorFromCode(testDir string, skipBuilds bool) { + _, isRunOnOpenShiftCI := os.LookupEnv("RUN_ON_OPENSHIFT_CI") + if !isRunOnOpenShiftCI { + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", FeastControllerNamespace) + _, _ = Run(cmd, testDir) + + var err error + // projectimage stores the name of the image used in the example + var projectimage = "localhost/feast-operator:v0.0.1" + + // this image will be built in above make target. + var feastImage = "feastdev/feature-server:dev" + var feastLocalImage = "localhost/feastdev/feature-server:dev" + + if !skipBuilds { + By("building the manager(Operator) image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = LoadImageToKindClusterWithName(projectimage, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("building the feast image") + cmd = exec.Command("make", "feast-ci-dev-docker-img") + _, err = Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Tag the local feast image for the integration tests") + cmd = exec.Command("docker", "image", "tag", feastImage, feastLocalImage) + _, err = Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the feast image on Kind cluster") + err = LoadImageToKindClusterWithName(feastLocalImage, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage), fmt.Sprintf("FS_IMG=%s", feastLocalImage)) + _, err = Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("Validating that the controller-manager deployment is in available state") + err := CheckIfDeploymentExistsAndAvailable(FeastControllerNamespace, ControllerDeploymentName, Timeout) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + ControllerDeploymentName, err, + )) + fmt.Printf("Feast Control Manager Deployment %s is available\n", ControllerDeploymentName) +} + +// DeleteOperatorDeployment - Deletes the operator deployment +func DeleteOperatorDeployment(testDir string) { + _, isRunOnOpenShiftCI := os.LookupEnv("RUN_ON_OPENSHIFT_CI") + if !isRunOnOpenShiftCI { + By("Uninstalling the feast CRD") + cmd := exec.Command("kubectl", "delete", "deployment", ControllerDeploymentName, "-n", FeastControllerNamespace) + _, err := Run(cmd, testDir) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } +} + +// DeployPreviousVersionOperator - Deploys the previous version of the operator +func DeployPreviousVersionOperator() { + var err error + + cmd := exec.Command("kubectl", "apply", "-f", fmt.Sprintf("https://raw.githubusercontent.com/feast-dev/feast/refs/tags/v%s/infra/feast-operator/dist/install.yaml", feastversion.FeastVersion)) + _, err = Run(cmd, "/test/upgrade") + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = CheckIfDeploymentExistsAndAvailable(FeastControllerNamespace, ControllerDeploymentName, Timeout) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + ControllerDeploymentName, err, + )) + fmt.Printf("Feast Control Manager Deployment %s is available\n", ControllerDeploymentName) +} + +// GetSimplePreviousVerCR - Get The previous version simple CR for tests +func GetSimplePreviousVerCR() string { + return fmt.Sprintf("https://raw.githubusercontent.com/feast-dev/feast/refs/tags/v%s/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", feastversion.FeastVersion) +} + +// GetRemoteRegistryPreviousVerCR - Get The previous version remote registry CR for tests +func GetRemoteRegistryPreviousVerCR() string { + return fmt.Sprintf("https://raw.githubusercontent.com/feast-dev/feast/refs/tags/v%s/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml", feastversion.FeastVersion) +} diff --git a/infra/feast-operator/test/utils/utils.go b/infra/feast-operator/test/utils/utils.go index cfd9e595823..7529a3a0f50 100644 --- a/infra/feast-operator/test/utils/utils.go +++ b/infra/feast-operator/test/utils/utils.go @@ -35,29 +35,29 @@ const ( ) func warnError(err error) { - fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) } // InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. -func InstallPrometheusOperator() error { +func InstallPrometheusOperator(testDir string) error { url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) cmd := exec.Command("kubectl", "create", "-f", url) - _, err := Run(cmd) + _, err := Run(cmd, testDir) return err } // Run executes the provided command within this context -func Run(cmd *exec.Cmd) ([]byte, error) { - dir, _ := GetProjectDir() +func Run(cmd *exec.Cmd, testDir string) ([]byte, error) { + dir, _ := GetProjectDir(testDir) cmd.Dir = dir if err := os.Chdir(cmd.Dir); err != nil { - fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) } cmd.Env = append(os.Environ(), "GO111MODULE=on") command := strings.Join(cmd.Args, " ") - fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) output, err := cmd.CombinedOutput() if err != nil { return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) @@ -67,28 +67,28 @@ func Run(cmd *exec.Cmd) ([]byte, error) { } // UninstallPrometheusOperator uninstalls the prometheus -func UninstallPrometheusOperator() { +func UninstallPrometheusOperator(testDir string) { url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) cmd := exec.Command("kubectl", "delete", "-f", url) - if _, err := Run(cmd); err != nil { + if _, err := Run(cmd, testDir); err != nil { warnError(err) } } // UninstallCertManager uninstalls the cert manager -func UninstallCertManager() { +func UninstallCertManager(testDir string) { url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) cmd := exec.Command("kubectl", "delete", "-f", url) - if _, err := Run(cmd); err != nil { + if _, err := Run(cmd, testDir); err != nil { warnError(err) } } // InstallCertManager installs the cert manager bundle. -func InstallCertManager() error { +func InstallCertManager(testDir string) error { url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) cmd := exec.Command("kubectl", "apply", "-f", url) - if _, err := Run(cmd); err != nil { + if _, err := Run(cmd, testDir); err != nil { return err } // Wait for cert-manager-webhook to be ready, which can take time if cert-manager @@ -99,19 +99,20 @@ func InstallCertManager() error { "--timeout", "5m", ) - _, err := Run(cmd) + _, err := Run(cmd, testDir) return err } // LoadImageToKindCluster loads a local docker image to the kind cluster -func LoadImageToKindClusterWithName(name string) error { +func LoadImageToKindClusterWithName(name string, testDir string) error { cluster := "kind" if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { cluster = v } + fmt.Println("cluster used in the test is -", cluster) kindOptions := []string{"load", "docker-image", name, "--name", cluster} cmd := exec.Command("kind", kindOptions...) - _, err := Run(cmd) + _, err := Run(cmd, testDir) return err } @@ -130,11 +131,11 @@ func GetNonEmptyLines(output string) []string { } // GetProjectDir will return the directory where the project is -func GetProjectDir() (string, error) { +func GetProjectDir(projectDir string) (string, error) { wd, err := os.Getwd() if err != nil { return wd, err } - wd = strings.Replace(wd, "/test/e2e", "", -1) + wd = strings.Replace(wd, projectDir, "", -1) return wd, nil } diff --git a/infra/scripts/pixi/pixi.lock b/infra/scripts/pixi/pixi.lock index 1ca8742026c..5f957f508c9 100644 --- a/infra/scripts/pixi/pixi.lock +++ b/infra/scripts/pixi/pixi.lock @@ -1,4 +1,4 @@ -version: 5 +version: 6 environments: default: channels: @@ -7,16 +7,16 @@ environments: linux-64: - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h95c4c6d_6.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.1.39-h0ea3d13_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.2.0-h8f9b012_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.6.3-h0f3a69f_0.conda osx-64: - - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-17.0.6-heb59cac_3.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.1.45-h4e38c46_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-19.1.7-hf95d169_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.6.3-h8de1528_0.conda osx-arm64: - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-17.0.6-h5f092b4_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.1.45-hc069d6b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-19.1.7-ha82da77_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.6.3-h668ec48_0.conda py310: channels: - url: https://conda.anaconda.org/conda-forge/ @@ -28,11 +28,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.3-h2797004_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h95c4c6d_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.2.0-h8f9b012_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda @@ -42,12 +43,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.1.39-h0ea3d13_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.6.3-h0f3a69f_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 osx-64: - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.7.4-h8857fd0_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-17.0.6-heb59cac_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-19.1.7-hf95d169_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.46.0-h1b8f9f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.3.1-h87427d6_1.conda @@ -57,12 +58,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.1.45-h4e38c46_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.6.3-h8de1528_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 osx-arm64: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-17.0.6-h5f092b4_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-19.1.7-ha82da77_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.3-h091b4b1_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-hfb2fe0b_6.conda @@ -72,7 +73,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.1.45-hc069d6b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.6.3-h668ec48_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 py311: channels: @@ -86,11 +87,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h55db66e_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.2-h59595ed_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-hc881cc4_6.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-hc881cc4_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.3-h2797004_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h95c4c6d_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.2.0-h8f9b012_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda @@ -100,12 +102,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.1.39-h0ea3d13_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.6.3-h0f3a69f_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 osx-64: - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.7.4-h8857fd0_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-17.0.6-heb59cac_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-19.1.7-hf95d169_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libexpat-2.6.2-h73e2aa4_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.46.0-h1b8f9f3_0.conda @@ -116,12 +118,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.1.45-h4e38c46_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.6.3-h8de1528_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 osx-arm64: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-17.0.6-h5f092b4_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-19.1.7-ha82da77_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.2-hebf3989_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.3-h091b4b1_0.conda @@ -132,7 +134,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.1.45-hc069d6b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.6.3-h668ec48_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 py39: channels: @@ -145,11 +147,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.3-h2797004_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h95c4c6d_6.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.2.0-h8f9b012_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda @@ -159,12 +162,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.1.39-h0ea3d13_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.6.3-h0f3a69f_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 osx-64: - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.7.4-h8857fd0_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-17.0.6-heb59cac_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-19.1.7-hf95d169_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.46.0-h1b8f9f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.3.1-h87427d6_1.conda @@ -174,12 +177,12 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.1.45-h4e38c46_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.6.3-h8de1528_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 osx-arm64: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-17.0.6-h5f092b4_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-19.1.7-ha82da77_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.3-h091b4b1_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-hfb2fe0b_6.conda @@ -189,27 +192,17 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.1.45-hc069d6b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.6.3-h668ec48_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 packages: -- kind: conda - name: _libgcc_mutex - version: '0.1' - build: conda_forge - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 +- conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 sha256: fe51de6107f9edc7aa4f786a70f4a883943bc9d39b3bb7307c04c41410990726 md5: d7c89558ba9fa0495403155b64376d81 license: None size: 2562 timestamp: 1578324546067 -- kind: conda - name: _openmp_mutex - version: '4.5' - build: 2_gnu +- conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 build_number: 16 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 sha256: fbe2c5e56a653bebb982eda4876a9178aedfc2b545f25d0ce9c4c0b508253d22 md5: 73aaf86a425cc6e73fcf236a5a46396d depends: @@ -221,86 +214,48 @@ packages: license_family: BSD size: 23621 timestamp: 1650670423406 -- kind: conda - name: bzip2 - version: 1.0.8 - build: h10d778d_5 - build_number: 5 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hd590300_5.conda + sha256: 242c0c324507ee172c0e0dd2045814e746bb303d1eb78870d182ceb0abc726a8 + md5: 69b8b6202a07720f448be700e300ccf4 + depends: + - libgcc-ng >=12 + license: bzip2-1.0.6 + license_family: BSD + size: 254228 + timestamp: 1699279927352 +- conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda sha256: 61fb2b488928a54d9472113e1280b468a309561caa54f33825a3593da390b242 md5: 6097a6ca9ada32699b5fc4312dd6ef18 license: bzip2-1.0.6 license_family: BSD size: 127885 timestamp: 1699280178474 -- kind: conda - name: bzip2 - version: 1.0.8 - build: h93a5062_5 - build_number: 5 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda sha256: bfa84296a638bea78a8bb29abc493ee95f2a0218775642474a840411b950fe5f md5: 1bbc659ca658bfd49a481b5ef7a0f40f license: bzip2-1.0.6 license_family: BSD size: 122325 timestamp: 1699280294368 -- kind: conda - name: bzip2 - version: 1.0.8 - build: hd590300_5 - build_number: 5 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hd590300_5.conda - sha256: 242c0c324507ee172c0e0dd2045814e746bb303d1eb78870d182ceb0abc726a8 - md5: 69b8b6202a07720f448be700e300ccf4 - depends: - - libgcc-ng >=12 - license: bzip2-1.0.6 - license_family: BSD - size: 254228 - timestamp: 1699279927352 -- kind: conda - name: ca-certificates - version: 2024.2.2 - build: hbcca054_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda sha256: 91d81bfecdbb142c15066df70cc952590ae8991670198f92c66b62019b251aeb md5: 2f4327a1cbe7f022401b236e915a5fef license: ISC size: 155432 timestamp: 1706843687645 -- kind: conda - name: ca-certificates - version: 2024.2.2 - build: hf0a4a13_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda - sha256: 49bc3439816ac72d0c0e0f144b8cc870fdcc4adec2e861407ec818d8116b2204 - md5: fb416a1795f18dcc5a038bc2dc54edf9 - license: ISC - size: 155725 - timestamp: 1706844034242 -- kind: conda - name: ca-certificates - version: 2024.7.4 - build: h8857fd0_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.7.4-h8857fd0_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.7.4-h8857fd0_0.conda sha256: d16f46c489cb3192305c7d25b795333c5fc17bb0986de20598ed519f8c9cc9e4 md5: 7df874a4b05b2d2b82826190170eaa0f license: ISC size: 154473 timestamp: 1720077510541 -- kind: conda - name: ld_impl_linux-64 - version: '2.40' - build: h41732ed_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda + sha256: 49bc3439816ac72d0c0e0f144b8cc870fdcc4adec2e861407ec818d8116b2204 + md5: fb416a1795f18dcc5a038bc2dc54edf9 + license: ISC + size: 155725 + timestamp: 1706844034242 +- conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda sha256: f6cc89d887555912d6c61b295d398cff9ec982a3417d38025c45d5dd9b9e79cd md5: 7aca3059a1729aa76c597603f10b0dd3 constrains: @@ -309,12 +264,7 @@ packages: license_family: GPL size: 704696 timestamp: 1674833944779 -- kind: conda - name: ld_impl_linux-64 - version: '2.40' - build: h55db66e_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h55db66e_0.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h55db66e_0.conda sha256: ef969eee228cfb71e55146eaecc6af065f468cb0bc0a5239bc053b39db0b5f09 md5: 10569984e7db886e4f1abc2b47ad79a1 constrains: @@ -323,41 +273,29 @@ packages: license_family: GPL size: 713322 timestamp: 1713651222435 -- kind: conda - name: libcxx - version: 17.0.6 - build: h5f092b4_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-17.0.6-h5f092b4_0.conda - sha256: 119d3d9306f537d4c89dc99ed99b94c396d262f0b06f7833243646f68884f2c2 - md5: a96fd5dda8ce56c86a971e0fa02751d0 +- conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-19.1.7-hf95d169_0.conda + sha256: 6b2fa3fb1e8cd2000b0ed259e0c4e49cbef7b76890157fac3e494bc659a20330 + md5: 4b8f8dc448d814169dbc58fc7286057d depends: - - __osx >=11.0 + - __osx >=10.13 + arch: x86_64 + platform: osx license: Apache-2.0 WITH LLVM-exception license_family: Apache - size: 1248885 - timestamp: 1715020154867 -- kind: conda - name: libcxx - version: 17.0.6 - build: heb59cac_3 - build_number: 3 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libcxx-17.0.6-heb59cac_3.conda - sha256: 9df841c64b19a3843869467ff8ff2eb3f6c5491ebaac8fd94fb8029a5b00dcbf - md5: ef15f182e353155497e13726b915bfc4 + size: 527924 + timestamp: 1736877256721 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-19.1.7-ha82da77_0.conda + sha256: 776092346da87a2a23502e14d91eb0c32699c4a1522b7331537bd1c3751dcff5 + md5: 5b3e1610ff8bd5443476b91d618f5b77 depends: - - __osx >=10.13 + - __osx >=11.0 + arch: arm64 + platform: osx license: Apache-2.0 WITH LLVM-exception license_family: Apache - size: 1250659 - timestamp: 1720040263499 -- kind: conda - name: libexpat - version: 2.6.2 - build: h59595ed_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.2-h59595ed_0.conda + size: 523505 + timestamp: 1736877862502 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.2-h59595ed_0.conda sha256: 331bb7c7c05025343ebd79f86ae612b9e1e74d2687b8f3179faec234f986ce19 md5: e7ba12deb7020dd080c6c70e7b6f6a3d depends: @@ -368,12 +306,7 @@ packages: license_family: MIT size: 73730 timestamp: 1710362120304 -- kind: conda - name: libexpat - version: 2.6.2 - build: h73e2aa4_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libexpat-2.6.2-h73e2aa4_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/libexpat-2.6.2-h73e2aa4_0.conda sha256: a188a77b275d61159a32ab547f7d17892226e7dac4518d2c6ac3ac8fc8dfde92 md5: 3d1d51c8f716d97c864d12f7af329526 constrains: @@ -382,12 +315,7 @@ packages: license_family: MIT size: 69246 timestamp: 1710362566073 -- kind: conda - name: libexpat - version: 2.6.2 - build: hebf3989_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.2-hebf3989_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.2-hebf3989_0.conda sha256: ba7173ac30064ea901a4c9fb5a51846dcc25512ceb565759be7d18cbf3e5415e md5: e3cde7cfa87f82f7cb13d482d5e0ad09 constrains: @@ -396,119 +324,67 @@ packages: license_family: MIT size: 63655 timestamp: 1710362424980 -- kind: conda - name: libffi - version: 3.4.2 - build: h0d85af4_5 - build_number: 5 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 + sha256: ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e + md5: d645c6d2ac96843a2bfaccd2d62b3ac3 + depends: + - libgcc-ng >=9.4.0 + license: MIT + license_family: MIT + size: 58292 + timestamp: 1636488182923 +- conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2 sha256: 7a2d27a936ceee6942ea4d397f9c7d136f12549d86f7617e8b6bad51e01a941f md5: ccb34fb14960ad8b125962d3d79b31a9 license: MIT license_family: MIT size: 51348 timestamp: 1636488394370 -- kind: conda - name: libffi - version: 3.4.2 - build: h3422bc3_5 - build_number: 5 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 sha256: 41b3d13efb775e340e4dba549ab5c029611ea6918703096b2eaa9c015c0750ca md5: 086914b672be056eb70fd4285b6783b6 license: MIT license_family: MIT size: 39020 timestamp: 1636488587153 -- kind: conda - name: libffi - version: 3.4.2 - build: h7f98852_5 - build_number: 5 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 - sha256: ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e - md5: d645c6d2ac96843a2bfaccd2d62b3ac3 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda + sha256: 3a572d031cb86deb541d15c1875aaa097baefc0c580b54dc61f5edab99215792 + md5: ef504d1acbd74b7cc6849ef8af47dd03 depends: - - libgcc-ng >=9.4.0 - license: MIT - license_family: MIT - size: 58292 - timestamp: 1636488182923 -- kind: conda - name: libgcc-ng - version: 13.2.0 - build: h807b86a_5 - build_number: 5 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda - sha256: d32f78bfaac282cfe5205f46d558704ad737b8dbf71f9227788a5ca80facaba4 - md5: d4ff227c46917d3b4565302a2bbb276b - depends: - - _libgcc_mutex 0.1 conda_forge + - __glibc >=2.17,<3.0.a0 - _openmp_mutex >=4.5 constrains: - - libgomp 13.2.0 h807b86a_5 + - libgomp 14.2.0 h767d61c_2 + - libgcc-ng ==14.2.0=*_2 + arch: x86_64 + platform: linux license: GPL-3.0-only WITH GCC-exception-3.1 license_family: GPL - size: 770506 - timestamp: 1706819192021 -- kind: conda - name: libgcc-ng - version: 13.2.0 - build: hc881cc4_6 - build_number: 6 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-hc881cc4_6.conda - sha256: 836a0057525f1414de43642d357d0ab21ac7f85e24800b010dbc17d132e6efec - md5: df88796bd09a0d2ed292e59101478ad8 - depends: - - _libgcc_mutex 0.1 conda_forge - - _openmp_mutex >=4.5 - constrains: - - libgomp 13.2.0 hc881cc4_6 + size: 847885 + timestamp: 1740240653082 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda + sha256: fb7558c328b38b2f9d2e412c48da7890e7721ba018d733ebdfea57280df01904 + md5: a2222a6ada71fb478682efe483ce0f92 + depends: + - libgcc 14.2.0 h767d61c_2 + arch: x86_64 + platform: linux license: GPL-3.0-only WITH GCC-exception-3.1 license_family: GPL - size: 777315 - timestamp: 1713755001744 -- kind: conda - name: libgomp - version: 13.2.0 - build: h807b86a_5 - build_number: 5 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda - sha256: 0d3d4b1b0134283ea02d58e8eb5accf3655464cf7159abf098cc694002f8d34e - md5: d211c42b9ce49aee3734fdc828731689 - depends: - - _libgcc_mutex 0.1 conda_forge - license: GPL-3.0-only WITH GCC-exception-3.1 - license_family: GPL - size: 419751 - timestamp: 1706819107383 -- kind: conda - name: libgomp - version: 13.2.0 - build: hc881cc4_6 - build_number: 6 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-hc881cc4_6.conda - sha256: e722b19b23b31a14b1592d5eceabb38dc52452ff5e4d346e330526971c22e52a - md5: aae89d3736661c36a5591788aebd0817 - depends: - - _libgcc_mutex 0.1 conda_forge + size: 53758 + timestamp: 1740240660904 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda + sha256: 1a3130e0b9267e781b89399580f3163632d59fe5b0142900d63052ab1a53490e + md5: 06d02030237f4d5b3d9a7e7d348fe3c6 + depends: + - __glibc >=2.17,<3.0.a0 + arch: x86_64 + platform: linux license: GPL-3.0-only WITH GCC-exception-3.1 license_family: GPL - size: 422363 - timestamp: 1713754915251 -- kind: conda - name: libnsl - version: 2.0.1 - build: hd590300_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda + size: 459862 + timestamp: 1740240588123 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda sha256: 26d77a3bb4dceeedc2a41bd688564fe71bf2d149fdcf117049970bc02ff1add6 md5: 30fd6e37fe21f86f4bd26d6ee73eeec7 depends: @@ -517,25 +393,7 @@ packages: license_family: GPL size: 33408 timestamp: 1697359010159 -- kind: conda - name: libsqlite - version: 3.45.3 - build: h091b4b1_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.3-h091b4b1_0.conda - sha256: 4337f466eb55bbdc74e168b52ec8c38f598e3664244ec7a2536009036e2066cc - md5: c8c1186c7f3351f6ffddb97b1f54fc58 - depends: - - libzlib >=1.2.13,<2.0.0a0 - license: Unlicense - size: 824794 - timestamp: 1713367748819 -- kind: conda - name: libsqlite - version: 3.45.3 - build: h2797004_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.3-h2797004_0.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.3-h2797004_0.conda sha256: e2273d6860eadcf714a759ffb6dc24a69cfd01f2a0ea9d6c20f86049b9334e0c md5: b3316cbe90249da4f8e84cd66e1cc55b depends: @@ -544,12 +402,7 @@ packages: license: Unlicense size: 859858 timestamp: 1713367435849 -- kind: conda - name: libsqlite - version: 3.46.0 - build: h1b8f9f3_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.46.0-h1b8f9f3_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.46.0-h1b8f9f3_0.conda sha256: 63af1a9e3284c7e4952364bafe7267e41e2d9d8bcc0e85a4ea4b0ec02d3693f6 md5: 5dadfbc1a567fe6e475df4ce3148be09 depends: @@ -558,25 +411,27 @@ packages: license: Unlicense size: 908643 timestamp: 1718050720117 -- kind: conda - name: libstdcxx-ng - version: 13.2.0 - build: h95c4c6d_6 - build_number: 6 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h95c4c6d_6.conda - sha256: 2616dbf9d28431eea20b6e307145c6a92ea0328a047c725ff34b0316de2617da - md5: 3cfab3e709f77e9f1b3d380eb622494a +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.3-h091b4b1_0.conda + sha256: 4337f466eb55bbdc74e168b52ec8c38f598e3664244ec7a2536009036e2066cc + md5: c8c1186c7f3351f6ffddb97b1f54fc58 + depends: + - libzlib >=1.2.13,<2.0.0a0 + license: Unlicense + size: 824794 + timestamp: 1713367748819 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.2.0-h8f9b012_2.conda + sha256: 8f5bd92e4a24e1d35ba015c5252e8f818898478cb3bc50bd8b12ab54707dc4da + md5: a78c856b6dc6bf4ea8daeb9beaaa3fb0 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc 14.2.0 h767d61c_2 + arch: x86_64 + platform: linux license: GPL-3.0-only WITH GCC-exception-3.1 license_family: GPL - size: 3842900 - timestamp: 1713755068572 -- kind: conda - name: libuuid - version: 2.38.1 - build: h0b41bf4_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda + size: 3884556 + timestamp: 1740240685253 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda sha256: 787eb542f055a2b3de553614b25f09eefb0a0931b0c87dbcce6efdfd92f04f18 md5: 40b61aab5c7ba9ff276c41cfffe6b80b depends: @@ -585,13 +440,7 @@ packages: license_family: BSD size: 33601 timestamp: 1680112270483 -- kind: conda - name: libxcrypt - version: 4.4.36 - build: hd590300_1 - build_number: 1 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda sha256: 6ae68e0b86423ef188196fff6207ed0c8195dd84273cb5623b85aa08033a410c md5: 5aa797f8787fe7a17d1b0821485b5adc depends: @@ -599,13 +448,7 @@ packages: license: LGPL-2.1-or-later size: 100393 timestamp: 1702724383534 -- kind: conda - name: libzlib - version: 1.2.13 - build: hd590300_5 - build_number: 5 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda sha256: 370c7c5893b737596fd6ca0d9190c9715d89d888b8c88537ae1ef168c25e82e4 md5: f36c115f1ee199da648e0597ec2047ad depends: @@ -616,30 +459,7 @@ packages: license_family: Other size: 61588 timestamp: 1686575217516 -- kind: conda - name: libzlib - version: 1.2.13 - build: hfb2fe0b_6 - build_number: 6 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-hfb2fe0b_6.conda - sha256: 8b29a2386d99b8f58178951dcf19117b532cd9c4aa07623bf1667eae99755d32 - md5: 9c4e121cd926cab631bd1c4a61d18b17 - depends: - - __osx >=11.0 - constrains: - - zlib 1.2.13 *_6 - license: Zlib - license_family: Other - size: 46768 - timestamp: 1716874151980 -- kind: conda - name: libzlib - version: 1.3.1 - build: h87427d6_1 - build_number: 1 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.3.1-h87427d6_1.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.3.1-h87427d6_1.conda sha256: 80a62db652b1da0ccc100812a1d86e94f75028968991bfb17f9536f3aa72d91d md5: b7575b5aa92108dcc9aaab0f05f2dbce depends: @@ -650,12 +470,18 @@ packages: license_family: Other size: 57372 timestamp: 1716874211519 -- kind: conda - name: ncurses - version: 6.4.20240210 - build: h59595ed_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4.20240210-h59595ed_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-hfb2fe0b_6.conda + sha256: 8b29a2386d99b8f58178951dcf19117b532cd9c4aa07623bf1667eae99755d32 + md5: 9c4e121cd926cab631bd1c4a61d18b17 + depends: + - __osx >=11.0 + constrains: + - zlib 1.2.13 *_6 + license: Zlib + license_family: Other + size: 46768 + timestamp: 1716874151980 +- conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4.20240210-h59595ed_0.conda sha256: aa0f005b6727aac6507317ed490f0904430584fa8ca722657e7f0fb94741de81 md5: 97da8860a0da5413c7c98a3b3838a645 depends: @@ -663,35 +489,19 @@ packages: license: X11 AND BSD-3-Clause size: 895669 timestamp: 1710866638986 -- kind: conda - name: ncurses - version: '6.5' - build: h5846eda_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.5-h5846eda_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.5-h5846eda_0.conda sha256: 6ecc73db0e49143092c0934355ac41583a5d5a48c6914c5f6ca48e562d3a4b79 md5: 02a888433d165c99bf09784a7b14d900 license: X11 AND BSD-3-Clause size: 823601 timestamp: 1715195267791 -- kind: conda - name: ncurses - version: '6.5' - build: hb89a1cb_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-hb89a1cb_0.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-hb89a1cb_0.conda sha256: 87d7cf716d9d930dab682cb57b3b8d3a61940b47d6703f3529a155c938a6990a md5: b13ad5724ac9ae98b6b4fd87e4500ba4 license: X11 AND BSD-3-Clause size: 795131 timestamp: 1715194898402 -- kind: conda - name: openssl - version: 3.2.1 - build: hd590300_1 - build_number: 1 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_1.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_1.conda sha256: 2c689444ed19a603be457284cf2115ee728a3fafb7527326e96054dee7cdc1a7 md5: 9d731343cff6ee2e5a25c4a091bf8e2a depends: @@ -703,12 +513,7 @@ packages: license_family: Apache size: 2865379 timestamp: 1710793235846 -- kind: conda - name: openssl - version: 3.3.0 - build: hd590300_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.3.0-hd590300_0.conda +- conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.3.0-hd590300_0.conda sha256: fdbf05e4db88c592366c90bb82e446edbe33c6e49e5130d51c580b2629c0b5d5 md5: c0f3abb4a16477208bbd43a39bd56f18 depends: @@ -720,50 +525,33 @@ packages: license_family: Apache size: 2895187 timestamp: 1714466138265 -- kind: conda - name: openssl - version: 3.3.0 - build: hfb2fe0b_3 - build_number: 3 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.3.0-hfb2fe0b_3.conda - sha256: 6f41c163ab57e7499dff092be4498614651f0f6432e12c2b9f06859a8bc39b75 - md5: 730f618b008b3c13c1e3f973408ddd67 +- conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.3.1-h87427d6_1.conda + sha256: 60eed5d771207bcef05e0547c8f93a61d0ad1dcf75e19f8f8d9ded8094d78477 + md5: d838ffe9ec3c6d971f110e04487466ff depends: - - __osx >=11.0 + - __osx >=10.13 - ca-certificates constrains: - pyopenssl >=22.1 license: Apache-2.0 license_family: Apache - size: 2893954 - timestamp: 1716468329572 -- kind: conda - name: openssl - version: 3.3.1 - build: h87427d6_1 - build_number: 1 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.3.1-h87427d6_1.conda - sha256: 60eed5d771207bcef05e0547c8f93a61d0ad1dcf75e19f8f8d9ded8094d78477 - md5: d838ffe9ec3c6d971f110e04487466ff + size: 2551950 + timestamp: 1719364820943 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.3.0-hfb2fe0b_3.conda + sha256: 6f41c163ab57e7499dff092be4498614651f0f6432e12c2b9f06859a8bc39b75 + md5: 730f618b008b3c13c1e3f973408ddd67 depends: - - __osx >=10.13 + - __osx >=11.0 - ca-certificates constrains: - pyopenssl >=22.1 license: Apache-2.0 license_family: Apache - size: 2551950 - timestamp: 1719364820943 -- kind: conda - name: python - version: 3.9.19 - build: h0755675_0_cpython - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/python-3.9.19-h0755675_0_cpython.conda - sha256: b9253ca9ca5427e6da4b1d43353a110e0f2edfab9c951afb4bf01cbae2825b31 - md5: d9ee3647fbd9e8595b8df759b2bbefb8 + size: 2893954 + timestamp: 1716468329572 +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.14-hd12c33a_0_cpython.conda + sha256: 76a5d12e73542678b70a94570f7b0f7763f9a938f77f0e75d9ea615ef22aa84c + md5: 2b4ba962994e8bd4be9ff5b64b75aff2 depends: - bzip2 >=1.0.8,<2.0a0 - ld_impl_linux-64 >=2.36.1 @@ -781,23 +569,24 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.9.* *_cp39 + - python_abi 3.10.* *_cp310 license: Python-2.0 - size: 23800555 - timestamp: 1710940120866 -- kind: conda - name: python - version: 3.9.19 - build: h7a9c478_0_cpython - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/python-3.9.19-h7a9c478_0_cpython.conda - sha256: 58b76be84683bc03112b3ed7e377e99af24844ebf7d7568f6466a2dae7a887fe - md5: 7d53d366acd9dbfb498c69326ccb520a + size: 25517742 + timestamp: 1710939725109 +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.9-hb806964_0_cpython.conda + sha256: 177f33a1fb8d3476b38f73c37b42f01c0b014fa0e039a701fd9f83d83aae6d40 + md5: ac68acfa8b558ed406c75e98d3428d7b depends: - bzip2 >=1.0.8,<2.0a0 + - ld_impl_linux-64 >=2.36.1 + - libexpat >=2.6.2,<3.0a0 - libffi >=3.4,<4.0a0 - - libsqlite >=3.45.2,<4.0a0 - - libzlib >=1.2.13,<2.0.0a0 + - libgcc-ng >=12 + - libnsl >=2.0.1,<2.1.0a0 + - libsqlite >=3.45.3,<4.0a0 + - libuuid >=2.38.1,<3.0a0 + - libxcrypt >=4.4.36 + - libzlib >=1.2.13,<1.3.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 - readline >=8.2,<9.0a0 @@ -805,23 +594,23 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.9.* *_cp39 + - python_abi 3.11.* *_cp311 license: Python-2.0 - size: 12372436 - timestamp: 1710940037648 -- kind: conda - name: python - version: 3.9.19 - build: hd7ebdb9_0_cpython - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.9.19-hd7ebdb9_0_cpython.conda - sha256: 3b93f7a405f334043758dfa8aaca050429a954a37721a6462ebd20e94ef7c5a0 - md5: 45c4d173b12154f746be3b49b1190634 + size: 30884494 + timestamp: 1713553104915 +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.9.19-h0755675_0_cpython.conda + sha256: b9253ca9ca5427e6da4b1d43353a110e0f2edfab9c951afb4bf01cbae2825b31 + md5: d9ee3647fbd9e8595b8df759b2bbefb8 depends: - bzip2 >=1.0.8,<2.0a0 + - ld_impl_linux-64 >=2.36.1 - libffi >=3.4,<4.0a0 + - libgcc-ng >=12 + - libnsl >=2.0.1,<2.1.0a0 - libsqlite >=3.45.2,<4.0a0 - - libzlib >=1.2.13,<2.0.0a0 + - libuuid >=2.38.1,<3.0a0 + - libxcrypt >=4.4.36 + - libzlib >=1.2.13,<1.3.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 - readline >=8.2,<9.0a0 @@ -831,14 +620,9 @@ packages: constrains: - python_abi 3.9.* *_cp39 license: Python-2.0 - size: 11847835 - timestamp: 1710939779164 -- kind: conda - name: python - version: 3.10.14 - build: h00d2728_0_cpython - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.14-h00d2728_0_cpython.conda + size: 23800555 + timestamp: 1710940120866 +- conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.14-h00d2728_0_cpython.conda sha256: 00c1de2d46ede26609ef4e84a44b83be7876ba6a0215b7c83bff41a0656bf694 md5: 0a1cddc4382c5c171e791c70740546dd depends: @@ -857,18 +641,15 @@ packages: license: Python-2.0 size: 11890228 timestamp: 1710940046031 -- kind: conda - name: python - version: 3.10.14 - build: h2469fbe_0_cpython - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.14-h2469fbe_0_cpython.conda - sha256: 454d609fe25daedce9e886efcbfcadad103ed0362e7cb6d2bcddec90b1ecd3ee - md5: 4ae999c8227c6d8c7623d32d51d25ea9 +- conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.11.9-h657bba9_0_cpython.conda + sha256: 3b50a5abb3b812875beaa9ab792dbd1bf44f335c64e9f9fedcf92d953995651c + md5: 612763bc5ede9552e4233ec518b9c9fb depends: + - __osx >=10.9 - bzip2 >=1.0.8,<2.0a0 + - libexpat >=2.6.2,<3.0a0 - libffi >=3.4,<4.0a0 - - libsqlite >=3.45.2,<4.0a0 + - libsqlite >=3.45.3,<4.0a0 - libzlib >=1.2.13,<2.0.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 @@ -877,28 +658,18 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.10.* *_cp310 + - python_abi 3.11.* *_cp311 license: Python-2.0 - size: 12336005 - timestamp: 1710939659384 -- kind: conda - name: python - version: 3.10.14 - build: hd12c33a_0_cpython - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.14-hd12c33a_0_cpython.conda - sha256: 76a5d12e73542678b70a94570f7b0f7763f9a938f77f0e75d9ea615ef22aa84c - md5: 2b4ba962994e8bd4be9ff5b64b75aff2 + size: 15503226 + timestamp: 1713553747073 +- conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.9.19-h7a9c478_0_cpython.conda + sha256: 58b76be84683bc03112b3ed7e377e99af24844ebf7d7568f6466a2dae7a887fe + md5: 7d53d366acd9dbfb498c69326ccb520a depends: - bzip2 >=1.0.8,<2.0a0 - - ld_impl_linux-64 >=2.36.1 - libffi >=3.4,<4.0a0 - - libgcc-ng >=12 - - libnsl >=2.0.1,<2.1.0a0 - libsqlite >=3.45.2,<4.0a0 - - libuuid >=2.38.1,<3.0a0 - - libxcrypt >=4.4.36 - - libzlib >=1.2.13,<1.3.0a0 + - libzlib >=1.2.13,<2.0.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 - readline >=8.2,<9.0a0 @@ -906,24 +677,17 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.10.* *_cp310 + - python_abi 3.9.* *_cp39 license: Python-2.0 - size: 25517742 - timestamp: 1710939725109 -- kind: conda - name: python - version: 3.11.9 - build: h657bba9_0_cpython - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/python-3.11.9-h657bba9_0_cpython.conda - sha256: 3b50a5abb3b812875beaa9ab792dbd1bf44f335c64e9f9fedcf92d953995651c - md5: 612763bc5ede9552e4233ec518b9c9fb + size: 12372436 + timestamp: 1710940037648 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.14-h2469fbe_0_cpython.conda + sha256: 454d609fe25daedce9e886efcbfcadad103ed0362e7cb6d2bcddec90b1ecd3ee + md5: 4ae999c8227c6d8c7623d32d51d25ea9 depends: - - __osx >=10.9 - bzip2 >=1.0.8,<2.0a0 - - libexpat >=2.6.2,<3.0a0 - libffi >=3.4,<4.0a0 - - libsqlite >=3.45.3,<4.0a0 + - libsqlite >=3.45.2,<4.0a0 - libzlib >=1.2.13,<2.0.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 @@ -932,16 +696,11 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.11.* *_cp311 + - python_abi 3.10.* *_cp310 license: Python-2.0 - size: 15503226 - timestamp: 1713553747073 -- kind: conda - name: python - version: 3.11.9 - build: h932a869_0_cpython - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.11.9-h932a869_0_cpython.conda + size: 12336005 + timestamp: 1710939659384 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.11.9-h932a869_0_cpython.conda sha256: a436ceabde1f056a0ac3e347dadc780ee2a135a421ddb6e9a469370769829e3c md5: 293e0713ae804b5527a673e7605c04fc depends: @@ -962,25 +721,14 @@ packages: license: Python-2.0 size: 14644189 timestamp: 1713552154779 -- kind: conda - name: python - version: 3.11.9 - build: hb806964_0_cpython - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.9-hb806964_0_cpython.conda - sha256: 177f33a1fb8d3476b38f73c37b42f01c0b014fa0e039a701fd9f83d83aae6d40 - md5: ac68acfa8b558ed406c75e98d3428d7b +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.9.19-hd7ebdb9_0_cpython.conda + sha256: 3b93f7a405f334043758dfa8aaca050429a954a37721a6462ebd20e94ef7c5a0 + md5: 45c4d173b12154f746be3b49b1190634 depends: - bzip2 >=1.0.8,<2.0a0 - - ld_impl_linux-64 >=2.36.1 - - libexpat >=2.6.2,<3.0a0 - libffi >=3.4,<4.0a0 - - libgcc-ng >=12 - - libnsl >=2.0.1,<2.1.0a0 - - libsqlite >=3.45.3,<4.0a0 - - libuuid >=2.38.1,<3.0a0 - - libxcrypt >=4.4.36 - - libzlib >=1.2.13,<1.3.0a0 + - libsqlite >=3.45.2,<4.0a0 + - libzlib >=1.2.13,<2.0.0a0 - ncurses >=6.4.20240210,<7.0a0 - openssl >=3.2.1,<4.0a0 - readline >=8.2,<9.0a0 @@ -988,17 +736,11 @@ packages: - tzdata - xz >=5.2.6,<6.0a0 constrains: - - python_abi 3.11.* *_cp311 + - python_abi 3.9.* *_cp39 license: Python-2.0 - size: 30884494 - timestamp: 1713553104915 -- kind: conda - name: readline - version: '8.2' - build: h8228510_1 - build_number: 1 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda + size: 11847835 + timestamp: 1710939779164 +- conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda sha256: 5435cf39d039387fbdc977b0a762357ea909a7694d9528ab40f005e9208744d7 md5: 47d31b792659ce70f470b5c82fdfb7a4 depends: @@ -1008,13 +750,16 @@ packages: license_family: GPL size: 281456 timestamp: 1679532220005 -- kind: conda - name: readline - version: '8.2' - build: h92ec313_1 - build_number: 1 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda +- conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda + sha256: 41e7d30a097d9b060037f0c6a2b1d4c4ae7e942c06c943d23f9d481548478568 + md5: f17f77f2acf4d344734bda76829ce14e + depends: + - ncurses >=6.3,<7.0a0 + license: GPL-3.0-only + license_family: GPL + size: 255870 + timestamp: 1679532707590 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda sha256: a1dfa679ac3f6007362386576a704ad2d0d7a02e98f5d0b115f207a2da63e884 md5: 8cbb776a2f641b943d413b3e19df71f4 depends: @@ -1023,28 +768,17 @@ packages: license_family: GPL size: 250351 timestamp: 1679532511311 -- kind: conda - name: readline - version: '8.2' - build: h9e318b2_1 - build_number: 1 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda - sha256: 41e7d30a097d9b060037f0c6a2b1d4c4ae7e942c06c943d23f9d481548478568 - md5: f17f77f2acf4d344734bda76829ce14e +- conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda + sha256: e0569c9caa68bf476bead1bed3d79650bb080b532c64a4af7d8ca286c08dea4e + md5: d453b98d9c83e71da0741bb0ff4d76bc depends: - - ncurses >=6.3,<7.0a0 - license: GPL-3.0-only - license_family: GPL - size: 255870 - timestamp: 1679532707590 -- kind: conda - name: tk - version: 8.6.13 - build: h1abcd95_1 - build_number: 1 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda + - libgcc-ng >=12 + - libzlib >=1.2.13,<1.3.0a0 + license: TCL + license_family: BSD + size: 3318875 + timestamp: 1699202167581 +- conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda sha256: 30412b2e9de4ff82d8c2a7e5d06a15f4f4fef1809a72138b6ccb53a33b26faf5 md5: bf830ba5afc507c6232d4ef0fb1a882d depends: @@ -1053,13 +787,7 @@ packages: license_family: BSD size: 3270220 timestamp: 1699202389792 -- kind: conda - name: tk - version: 8.6.13 - build: h5083fa2_1 - build_number: 1 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda sha256: 72457ad031b4c048e5891f3f6cb27a53cb479db68a52d965f796910e71a403a8 md5: b50a57ba89c32b62428b71a875291c9b depends: @@ -1068,86 +796,53 @@ packages: license_family: BSD size: 3145523 timestamp: 1699202432999 -- kind: conda - name: tk - version: 8.6.13 - build: noxft_h4845f30_101 - build_number: 101 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda - sha256: e0569c9caa68bf476bead1bed3d79650bb080b532c64a4af7d8ca286c08dea4e - md5: d453b98d9c83e71da0741bb0ff4d76bc - depends: - - libgcc-ng >=12 - - libzlib >=1.2.13,<1.3.0a0 - license: TCL - license_family: BSD - size: 3318875 - timestamp: 1699202167581 -- kind: conda - name: tzdata - version: 2024a - build: h0c530f3_0 - subdir: noarch - noarch: generic - url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda +- conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda sha256: 7b2b69c54ec62a243eb6fba2391b5e443421608c3ae5dbff938ad33ca8db5122 md5: 161081fc7cec0bfda0d86d7cb595f8d8 license: LicenseRef-Public-Domain size: 119815 timestamp: 1706886945727 -- kind: conda - name: uv - version: 0.1.39 - build: h0ea3d13_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/uv-0.1.39-h0ea3d13_0.conda - sha256: 763d149b6f4f5c70c91e4106d3a48409c48283ed2e27392578998fb2441f23d8 - md5: c3206e7ca254e50b3556917886f9b12b +- conda: https://conda.anaconda.org/conda-forge/linux-64/uv-0.6.3-h0f3a69f_0.conda + sha256: fc33719d8cccf555748c2cb17bede5c0c06637269a0be3979f0eaebcca9f4eb0 + md5: bfee7af0ca5d4b0397bbd9ddf386d14b depends: - - libgcc-ng >=12 - - libstdcxx-ng >=12 + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + - libstdcxx >=13 + constrains: + - __glibc >=2.17 + arch: x86_64 + platform: linux license: Apache-2.0 OR MIT - size: 11891252 - timestamp: 1714233659570 -- kind: conda - name: uv - version: 0.1.45 - build: h4e38c46_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/uv-0.1.45-h4e38c46_0.conda - sha256: 8c11774ca1940dcd90187ce240afea26b76e2942f9b18d65f6d4b483534193fd - md5: 754ce8a22c94a30c7bbd42274c7fae31 + size: 11471064 + timestamp: 1740442105821 +- conda: https://conda.anaconda.org/conda-forge/osx-64/uv-0.6.3-h8de1528_0.conda + sha256: e61ed82bb71264dc7dcf9ca1528796907b515ceec508d5c6f4d6b79e1716e0ea + md5: 861adce9aeb74e0124187afbde2f4d4a depends: - __osx >=10.13 - - libcxx >=16 + - libcxx >=18 constrains: - - __osx >=10.12 + - __osx >=10.13 + arch: x86_64 + platform: osx license: Apache-2.0 OR MIT - size: 8937335 - timestamp: 1716265195083 -- kind: conda - name: uv - version: 0.1.45 - build: hc069d6b_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.1.45-hc069d6b_0.conda - sha256: 80dfc19f2ef473e86e718361847d1d598e95ffd0c0f5de7d07cda35d25f6aef5 - md5: 9192238a60bc6da9c41092990c31eb41 + size: 11024081 + timestamp: 1740443179556 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/uv-0.6.3-h668ec48_0.conda + sha256: b1ada7b2d26f82effe5dfddfe31f5674a39196d6ddca40f02cfd23390d5446f0 + md5: 0a3cd436a7e106362489ae2ff09db1c4 depends: - __osx >=11.0 - - libcxx >=16 + - libcxx >=18 constrains: - __osx >=11.0 + arch: arm64 + platform: osx license: Apache-2.0 OR MIT - size: 9231858 - timestamp: 1716265232676 -- kind: conda - name: xz - version: 5.2.6 - build: h166bdaf_0 - subdir: linux-64 - url: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 + size: 9968257 + timestamp: 1740443196241 +- conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 sha256: 03a6d28ded42af8a347345f82f3eebdd6807a08526d47899a42d62d319609162 md5: 2161070d867d1b1204ea749c8eec4ef0 depends: @@ -1155,25 +850,15 @@ packages: license: LGPL-2.1 and GPL-2.0 size: 418368 timestamp: 1660346797927 -- kind: conda - name: xz - version: 5.2.6 - build: h57fd34a_0 - subdir: osx-arm64 - url: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 - sha256: 59d78af0c3e071021cfe82dc40134c19dab8cdf804324b62940f5c8cd71803ec - md5: 39c6b54e94014701dd157f4f576ed211 - license: LGPL-2.1 and GPL-2.0 - size: 235693 - timestamp: 1660346961024 -- kind: conda - name: xz - version: 5.2.6 - build: h775f41a_0 - subdir: osx-64 - url: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 +- conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2 sha256: eb09823f34cc2dd663c0ec4ab13f246f45dcd52e5b8c47b9864361de5204a1c8 md5: a72f9d4ea13d55d745ff1ed594747f10 license: LGPL-2.1 and GPL-2.0 size: 238119 timestamp: 1660346964847 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 + sha256: 59d78af0c3e071021cfe82dc40134c19dab8cdf804324b62940f5c8cd71803ec + md5: 39c6b54e94014701dd157f4f576ed211 + license: LGPL-2.1 and GPL-2.0 + size: 235693 + timestamp: 1660346961024 diff --git a/infra/scripts/pixi/pixi.toml b/infra/scripts/pixi/pixi.toml index 487c6f7def1..89b9f0376f8 100644 --- a/infra/scripts/pixi/pixi.toml +++ b/infra/scripts/pixi/pixi.toml @@ -6,7 +6,7 @@ platforms = ["linux-64", "osx-arm64", "osx-64"] [tasks] [dependencies] -uv = ">=0.1.39,<0.2" +uv = ">=0.6.3" [feature.py39.dependencies] python = "~=3.9.0" diff --git a/infra/scripts/release/files_to_bump.txt b/infra/scripts/release/files_to_bump.txt index 652bc3cad10..71cf1746b6d 100644 --- a/infra/scripts/release/files_to_bump.txt +++ b/infra/scripts/release/files_to_bump.txt @@ -14,6 +14,9 @@ infra/feast-helm-operator/Makefile 6 infra/feast-helm-operator/config/manager/kustomization.yaml 8 infra/feast-operator/Makefile 6 infra/feast-operator/config/manager/kustomization.yaml 8 +infra/feast-operator/config/component_metadata.yaml 4 +infra/feast-operator/config/overlays/odh/params.env 1 2 infra/feast-operator/api/feastversion/version.go 20 java/pom.xml 38 +sdk/python/feast/infra/feature_servers/multicloud/requirements.txt 2 ui/package.json 3 diff --git a/java/datatypes/pom.xml b/java/datatypes/pom.xml index b0ba049c575..967262d0e01 100644 --- a/java/datatypes/pom.xml +++ b/java/datatypes/pom.xml @@ -118,6 +118,11 @@ grpc-stub ${grpc.version} + + io.grpc + grpc-api + ${grpc.version} + javax.annotation javax.annotation-api diff --git a/java/pom.xml b/java/pom.xml index 416ebe59786..d7076ef501e 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -35,15 +35,15 @@ - 0.41.0 + 0.46.0 https://github.com/feast-dev/feast UTF-8 UTF-8 - 1.30.2 + 1.63.0 3.12.2 - 3.16.1 + 3.25.5 1.111.1 0.8.0 1.9.10 @@ -61,15 +61,15 @@ 1.5.24 3.14.7 3.10 - 2.14.0 + 2.15.0 2.3.1 1.3.2 2.0.1.Final 0.21.0 1.6.6 - 30.1-jre + 32.0.0-jre 3.4.34 - 4.1.101.Final + 4.1.96.Final src/main/java/**/BatchLoadsWithResult.java - + @@ -365,7 +365,7 @@ [11.0,) - + @@ -376,7 +376,7 @@ - + diff --git a/java/serving-client/pom.xml b/java/serving-client/pom.xml index 7b8838a009c..dc611b4a76e 100644 --- a/java/serving-client/pom.xml +++ b/java/serving-client/pom.xml @@ -50,6 +50,11 @@ grpc-testing ${grpc.version} + + io.grpc + grpc-api + ${grpc.version} + com.google.protobuf protobuf-java-util diff --git a/java/serving/pom.xml b/java/serving/pom.xml index ca7f8a73b5f..1be4da1b622 100644 --- a/java/serving/pom.xml +++ b/java/serving/pom.xml @@ -126,7 +126,7 @@ com.azure azure-storage-blob - 12.25.2 + 12.26.1 com.azure @@ -164,6 +164,11 @@ grpc-stub ${grpc.version} + + io.grpc + grpc-api + ${grpc.version} + io.grpc grpc-netty-shaded @@ -192,7 +197,7 @@ io.jaegertracing jaeger-client - 1.3.2 + 1.8.1 io.opentracing @@ -240,7 +245,7 @@ com.google.cloud google-cloud-storage - 1.118.0 + 2.43.1 @@ -253,13 +258,13 @@ com.amazonaws aws-java-sdk-s3 - 1.12.261 + 1.12.546 com.amazonaws aws-java-sdk-sts - 1.12.476 + 1.12.546 @@ -378,7 +383,7 @@ io.lettuce lettuce-core - 6.0.2.RELEASE + 6.5.1.RELEASE org.apache.commons diff --git a/protos/feast/core/DataSource.proto b/protos/feast/core/DataSource.proto index d129086f451..9c31851823d 100644 --- a/protos/feast/core/DataSource.proto +++ b/protos/feast/core/DataSource.proto @@ -268,3 +268,7 @@ message DataSource { AthenaOptions athena_options = 35; } } + +message DataSourceList { + repeated DataSource datasources = 1; +} \ No newline at end of file diff --git a/protos/feast/core/Entity.proto b/protos/feast/core/Entity.proto index d8d8bedc5eb..915402804fc 100644 --- a/protos/feast/core/Entity.proto +++ b/protos/feast/core/Entity.proto @@ -58,3 +58,7 @@ message EntityMeta { google.protobuf.Timestamp created_timestamp = 1; google.protobuf.Timestamp last_updated_timestamp = 2; } + +message EntityList { + repeated Entity entities = 1; +} diff --git a/protos/feast/core/Feature.proto b/protos/feast/core/Feature.proto index 882de47eb9c..8a56d67905a 100644 --- a/protos/feast/core/Feature.proto +++ b/protos/feast/core/Feature.proto @@ -35,6 +35,11 @@ message FeatureSpecV2 { map tags = 3; // Description of the feature. - string description = 4; + + // Field indicating the vector will be indexed for vector similarity search + bool vector_index = 5; + + // Metric used for vector similarity search. + string vector_search_metric = 6; } diff --git a/protos/feast/core/FeatureService.proto b/protos/feast/core/FeatureService.proto index 80d32eb4dec..380b2dc3718 100644 --- a/protos/feast/core/FeatureService.proto +++ b/protos/feast/core/FeatureService.proto @@ -61,6 +61,7 @@ message LoggingConfig { SnowflakeDestination snowflake_destination = 6; CustomDestination custom_destination = 7; AthenaDestination athena_destination = 8; + CouchbaseColumnarDestination couchbase_columnar_destination = 9; } message FileDestination { @@ -95,4 +96,17 @@ message LoggingConfig { string kind = 1; map config = 2; } + + message CouchbaseColumnarDestination { + // Destination database name + string database = 1; + // Destination scope name + string scope = 2; + // Destination collection name + string collection = 3; + } } + +message FeatureServiceList { + repeated FeatureService featureservices = 1; +} \ No newline at end of file diff --git a/protos/feast/core/FeatureView.proto b/protos/feast/core/FeatureView.proto index c9e38bf3448..3e9aa17256f 100644 --- a/protos/feast/core/FeatureView.proto +++ b/protos/feast/core/FeatureView.proto @@ -92,3 +92,7 @@ message MaterializationInterval { google.protobuf.Timestamp start_time = 1; google.protobuf.Timestamp end_time = 2; } + +message FeatureViewList { + repeated FeatureView featureviews = 1; +} diff --git a/protos/feast/core/OnDemandFeatureView.proto b/protos/feast/core/OnDemandFeatureView.proto index 65e8018473e..3ed8ffe4aed 100644 --- a/protos/feast/core/OnDemandFeatureView.proto +++ b/protos/feast/core/OnDemandFeatureView.proto @@ -101,3 +101,7 @@ message UserDefinedFunction { // The string representation of the udf string body_text = 3; } + +message OnDemandFeatureViewList { + repeated OnDemandFeatureView ondemandfeatureviews = 1; +} \ No newline at end of file diff --git a/protos/feast/registry/RegistryServer.proto b/protos/feast/registry/RegistryServer.proto index 6685bc0baa1..fb68d519dd9 100644 --- a/protos/feast/registry/RegistryServer.proto +++ b/protos/feast/registry/RegistryServer.proto @@ -17,6 +17,8 @@ import "feast/core/InfraObject.proto"; import "feast/core/Permission.proto"; import "feast/core/Project.proto"; +option go_package = "github.com/feast-dev/feast/go/protos/feast/registry"; + service RegistryServer{ // Entity RPCs rpc ApplyEntity (ApplyEntityRequest) returns (google.protobuf.Empty) {} diff --git a/protos/feast/serving/GrpcServer.proto b/protos/feast/serving/GrpcServer.proto index 34edb4ebe9c..b30e1e9d74d 100644 --- a/protos/feast/serving/GrpcServer.proto +++ b/protos/feast/serving/GrpcServer.proto @@ -2,6 +2,8 @@ syntax = "proto3"; import "feast/serving/ServingService.proto"; +option go_package = "github.com/feast-dev/feast/go/protos/feast/serving"; + message PushRequest { map features = 1; string stream_feature_view = 2; diff --git a/pyproject.toml b/pyproject.toml index 2a051231e2a..9eec118099a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,170 @@ +[project] +name = "feast" +description = "Python SDK for Feast" +readme = "README.md" +requires-python = ">=3.9.0" +license = {file = "LICENSE"} +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9" +] +dynamic = [ + "version", +] +dependencies = [ + "click>=7.0.0,<9.0.0", + "colorama>=0.3.9,<1", + "dill~=0.3.0", + "protobuf>=4.24.0", + "Jinja2>=2,<4", + "jsonschema", + "mmh3", + "numpy>=1.22,<2", + "pandas>=1.4.3,<3", + "pyarrow<18.1.0", + "pydantic>=2.0.0", + "pygments>=2.12.0,<3", + "PyYAML>=5.4.0,<7", + "requests", + "SQLAlchemy[mypy]>1", + "tabulate>=0.8.0,<1", + "tenacity>=7,<9", + "toml>=0.10.0,<1", + "tqdm>=4,<5", + "typeguard>=4.0.0", + "fastapi>=0.68.0", + "uvicorn[standard]>=0.14.0,<1", + "uvicorn-worker", + "gunicorn; platform_system != 'Windows'", + "dask[dataframe]>=2024.2.1", + "prometheus_client", + "psutil", + "bigtree>=0.19.2", + "pyjwt", +] + +[project.optional-dependencies] +aws = ["boto3>=1.17.0,<2", "fsspec<=2024.9.0", "aiobotocore>2,<3"] +azure = [ + "azure-storage-blob>=0.37.0", + "azure-identity>=1.6.1", + "SQLAlchemy>=1.4.19", + "pyodbc>=4.0.30", + "pymssql" +] +cassandra = ["cassandra-driver>=3.24.0,<4"] +couchbase = ["couchbase==4.3.2", "couchbase-columnar==1.0.0"] +delta = ["deltalake"] +docling = ["docling>=2.23.0"] +duckdb = ["ibis-framework[duckdb]>=9.0.0,<10"] +elasticsearch = ["elasticsearch>=8.13.0"] +faiss = ["faiss-cpu>=1.7.0,<2"] +gcp = [ + "google-api-core>=1.23.0,<3", + "googleapis-common-protos>=1.52.0,<2", + "google-cloud-bigquery[pandas]>=2,<4", + "google-cloud-bigquery-storage >= 2.0.0,<3", + "google-cloud-datastore>=2.16.0,<3", + "google-cloud-storage>=1.34.0,<3", + "google-cloud-bigtable>=2.11.0,<3", + "fsspec<=2024.9.0", +] +ge = ["great_expectations>=0.15.41,<1"] +go = ["cffi>=1.15.0"] +grpcio = [ + "grpcio>=1.56.2,<2", + "grpcio-reflection>=1.56.2,<2", + "grpcio-health-checking>=1.56.2,<2", +] +hazelcast = ["hazelcast-python-client>=5.1"] +hbase = ["happybase>=1.2.0,<3"] +ibis = [ + "ibis-framework>=9.0.0,<10", + "ibis-substrait>=4.0.0", +] +ikv = [ + "ikvpy>=0.0.36", +] +k8s = ["kubernetes<=20.13.0"] +milvus = ["pymilvus"] +mssql = ["ibis-framework[mssql]>=9.0.0,<10"] +mysql = ["pymysql", "types-PyMySQL"] +opentelemetry = ["prometheus_client", "psutil"] +spark = ["pyspark>=3.0.0,<4"] +trino = ["trino>=0.305.0,<0.400.0", "regex"] +postgres = ["psycopg[binary,pool]>=3.0.0,<4"] +pytorch = ["torch>=2.2.2", "torchvision>=0.17.2"] +qdrant = ["qdrant-client>=1.12.0"] +redis = [ + "redis>=4.2.2,<5", + "hiredis>=2.0.0,<3", +] +singlestore = ["singlestoredb<1.8.0"] +snowflake = [ + "snowflake-connector-python[pandas]>=3.7,<4", +] +sqlite_vec = ["sqlite-vec==v0.1.6"] + +ci = [ + "build", + "virtualenv==20.23.0", + "cryptography>=43.0,<44", + "ruff>=0.8.0", + "mypy-protobuf>=3.1", + "grpcio-tools>=1.56.2,<2", + "grpcio-testing>=1.56.2,<2", + # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656). + "httpx==0.27.2", + "minio==7.2.11", + "mock==2.0.0", + "moto<5", + "mypy>=1.4.1,<1.11.3", + "urllib3>=1.25.4,<3", + "psutil==5.9.0", + "py>=1.11.0", # https://github.com/pytest-dev/pytest/issues/10420 + "pytest>=6.0.0,<8", + "pytest-asyncio<=0.24.0", + "pytest-cov", + "pytest-xdist", + "pytest-benchmark>=3.4.1,<4", + "pytest-lazy-fixture==0.6.3", + "pytest-timeout==1.4.2", + "pytest-ordering~=0.6.0", + "pytest-mock==1.10.4", + "pytest-env", + "Sphinx>4.0.0,<7", + "testcontainers==4.8.2", + "python-keycloak==4.2.2", + "pre-commit<3.3.2", + "assertpy==1.1", + "pip-tools", + "pybindgen", + "types-protobuf~=3.19.22", + "types-python-dateutil", + "types-pytz", + "types-PyYAML", + "types-redis", + "types-requests<2.31.0", + "types-setuptools", + "types-tabulate", + "virtualenv<20.24.2", + "feast[aws, azure, cassandra, couchbase, delta, docling, duckdb, elasticsearch, faiss, gcp, ge, go, grpcio, hazelcast, hbase, ibis, ikv, k8s, milvus, mssql, mysql, opentelemetry, spark, trino, postgres, pytorch, qdrant, redis, singlestore, snowflake, sqlite_vec]" +] +nlp = ["feast[docling, milvus, pytorch]"] +dev = ["feast[ci]"] +docs = ["feast[ci]"] + +[project.urls] +Homepage = "https://github.com/feast-dev/feast" + +[[project.authors]] +name = "Feast" + +[project.scripts] +feast = "feast.cli:cli" + [build-system] requires = [ "pybindgen==0.22.0", @@ -7,8 +174,15 @@ requires = [ "wheel", ] +[tool.setuptools] +packages = {find = {where = ["sdk/python"], exclude = ["java", "infra", "sdk/python/tests", "ui"]}} + [tool.setuptools_scm] -# Including this section is comparable to supplying use_scm_version=True in setup.py. +# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm. +# Regex modified from default tag regex in: +# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9 +tag_regex = "^(?:[\\/\\w-]+)?(?P[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$" + [tool.ruff] line-length = 88 diff --git a/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.rst b/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.rst new file mode 100644 index 00000000000..7104b02bb66 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.rst @@ -0,0 +1,37 @@ +feast.infra.offline\_stores.contrib.couchbase\_offline\_store package +===================================================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + feast.infra.offline_stores.contrib.couchbase_offline_store.tests + +Submodules +---------- + +feast.infra.offline\_stores.contrib.couchbase\_offline\_store.couchbase module +------------------------------------------------------------------------------ + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase + :members: + :undoc-members: + :show-inheritance: + +feast.infra.offline\_stores.contrib.couchbase\_offline\_store.couchbase\_source module +-------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_offline_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.tests.rst b/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.tests.rst new file mode 100644 index 00000000000..41566b5359a --- /dev/null +++ b/sdk/python/docs/source/feast.infra.offline_stores.contrib.couchbase_offline_store.tests.rst @@ -0,0 +1,21 @@ +feast.infra.offline\_stores.contrib.couchbase\_offline\_store.tests package +=========================================================================== + +Submodules +---------- + +feast.infra.offline\_stores.contrib.couchbase\_offline\_store.tests.data\_source module +--------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_offline_store.tests.data_source + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_offline_store.tests + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.offline_stores.contrib.rst b/sdk/python/docs/source/feast.infra.offline_stores.contrib.rst index ec74ddab05c..61e797bd6a9 100644 --- a/sdk/python/docs/source/feast.infra.offline_stores.contrib.rst +++ b/sdk/python/docs/source/feast.infra.offline_stores.contrib.rst @@ -8,6 +8,7 @@ Subpackages :maxdepth: 4 feast.infra.offline_stores.contrib.athena_offline_store + feast.infra.offline_stores.contrib.couchbase_offline_store feast.infra.offline_stores.contrib.mssql_offline_store feast.infra.offline_stores.contrib.postgres_offline_store feast.infra.offline_stores.contrib.spark_offline_store @@ -24,6 +25,14 @@ feast.infra.offline\_stores.contrib.athena\_repo\_configuration module :undoc-members: :show-inheritance: +feast.infra.offline\_stores.contrib.couchbase\_columnar\_repo\_configuration module +----------------------------------------------------------------------------------- + +.. automodule:: feast.infra.offline_stores.contrib.couchbase_columnar_repo_configuration + :members: + :undoc-members: + :show-inheritance: + feast.infra.offline\_stores.contrib.mssql\_repo\_configuration module --------------------------------------------------------------------- diff --git a/sdk/python/docs/source/feast.infra.online_stores.cassandra_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.cassandra_online_store.rst new file mode 100644 index 00000000000..7c5c3d371a7 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.cassandra_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.cassandra\_online\_store package +=========================================================== + +Submodules +---------- + +feast.infra.online\_stores.cassandra\_online\_store.cassandra\_online\_store module +----------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.cassandra_online_store.cassandra_online_store + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.cassandra\_online\_store.cassandra\_repo\_configuration module +----------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.cassandra_online_store.cassandra_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.cassandra_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.couchbase_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.couchbase_online_store.rst new file mode 100644 index 00000000000..29d51304928 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.couchbase_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.couchbase\_online\_store package +=========================================================== + +Submodules +---------- + +feast.infra.online\_stores.couchbase\_online\_store.couchbase module +-------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.couchbase_online_store.couchbase + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.couchbase\_online\_store.couchbase\_repo\_configuration module +----------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.couchbase_online_store.couchbase_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.couchbase_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.elasticsearch_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.elasticsearch_online_store.rst new file mode 100644 index 00000000000..d470e3301d0 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.elasticsearch_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.elasticsearch\_online\_store package +=============================================================== + +Submodules +---------- + +feast.infra.online\_stores.elasticsearch\_online\_store.elasticsearch module +---------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.elasticsearch_online_store.elasticsearch + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.elasticsearch\_online\_store.elasticsearch\_repo\_configuration module +------------------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.elasticsearch_online_store.elasticsearch_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.elasticsearch_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.hazelcast_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.hazelcast_online_store.rst new file mode 100644 index 00000000000..9cb565ca132 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.hazelcast_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.hazelcast\_online\_store package +=========================================================== + +Submodules +---------- + +feast.infra.online\_stores.hazelcast\_online\_store.hazelcast\_online\_store module +----------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.hazelcast_online_store.hazelcast_online_store + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.hazelcast\_online\_store.hazelcast\_repo\_configuration module +----------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.hazelcast_online_store.hazelcast_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.hazelcast_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.hbase_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.hbase_online_store.rst new file mode 100644 index 00000000000..50ad80e0a9e --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.hbase_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.hbase\_online\_store package +======================================================= + +Submodules +---------- + +feast.infra.online\_stores.hbase\_online\_store.hbase module +------------------------------------------------------------ + +.. automodule:: feast.infra.online_stores.hbase_online_store.hbase + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.hbase\_online\_store.hbase\_repo\_configuration module +--------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.hbase_online_store.hbase_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.hbase_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.ikv_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.ikv_online_store.rst new file mode 100644 index 00000000000..391af17024f --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.ikv_online_store.rst @@ -0,0 +1,21 @@ +feast.infra.online\_stores.ikv\_online\_store package +===================================================== + +Submodules +---------- + +feast.infra.online\_stores.ikv\_online\_store.ikv module +-------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.ikv_online_store.ikv + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.ikv_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.milvus_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.milvus_online_store.rst new file mode 100644 index 00000000000..5ae3015bf37 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.milvus_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.milvus\_online\_store package +======================================================== + +Submodules +---------- + +feast.infra.online\_stores.milvus\_online\_store.milvus module +-------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.milvus_online_store.milvus + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.milvus\_online\_store.milvus\_repo\_configuration module +----------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.milvus_online_store.milvus_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.milvus_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.mysql_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.mysql_online_store.rst new file mode 100644 index 00000000000..b1a9ea4f802 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.mysql_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.mysql\_online\_store package +======================================================= + +Submodules +---------- + +feast.infra.online\_stores.mysql\_online\_store.mysql module +------------------------------------------------------------ + +.. automodule:: feast.infra.online_stores.mysql_online_store.mysql + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.mysql\_online\_store.mysql\_repo\_configuration module +--------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.mysql_online_store.mysql_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.mysql_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.postgres_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.postgres_online_store.rst new file mode 100644 index 00000000000..9dfd200a4e1 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.postgres_online_store.rst @@ -0,0 +1,37 @@ +feast.infra.online\_stores.postgres\_online\_store package +========================================================== + +Submodules +---------- + +feast.infra.online\_stores.postgres\_online\_store.pgvector\_repo\_configuration module +--------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.postgres_online_store.pgvector_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.postgres\_online\_store.postgres module +------------------------------------------------------------------ + +.. automodule:: feast.infra.online_stores.postgres_online_store.postgres + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.postgres\_online\_store.postgres\_repo\_configuration module +--------------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.postgres_online_store.postgres_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.postgres_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.qdrant_online_store.rst b/sdk/python/docs/source/feast.infra.online_stores.qdrant_online_store.rst new file mode 100644 index 00000000000..5c210d4124d --- /dev/null +++ b/sdk/python/docs/source/feast.infra.online_stores.qdrant_online_store.rst @@ -0,0 +1,29 @@ +feast.infra.online\_stores.qdrant\_online\_store package +======================================================== + +Submodules +---------- + +feast.infra.online\_stores.qdrant\_online\_store.qdrant module +-------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.qdrant_online_store.qdrant + :members: + :undoc-members: + :show-inheritance: + +feast.infra.online\_stores.qdrant\_online\_store.qdrant\_repo\_configuration module +----------------------------------------------------------------------------------- + +.. automodule:: feast.infra.online_stores.qdrant_online_store.qdrant_repo_configuration + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.online_stores.qdrant_online_store + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.online_stores.rst b/sdk/python/docs/source/feast.infra.online_stores.rst index ea714e45c5b..c07c7e0c279 100644 --- a/sdk/python/docs/source/feast.infra.online_stores.rst +++ b/sdk/python/docs/source/feast.infra.online_stores.rst @@ -7,7 +7,16 @@ Subpackages .. toctree:: :maxdepth: 4 - feast.infra.online_stores + feast.infra.online_stores.cassandra_online_store + feast.infra.online_stores.couchbase_online_store + feast.infra.online_stores.elasticsearch_online_store + feast.infra.online_stores.hazelcast_online_store + feast.infra.online_stores.hbase_online_store + feast.infra.online_stores.ikv_online_store + feast.infra.online_stores.milvus_online_store + feast.infra.online_stores.mysql_online_store + feast.infra.online_stores.postgres_online_store + feast.infra.online_stores.qdrant_online_store Submodules ---------- @@ -36,6 +45,14 @@ feast.infra.online\_stores.dynamodb module :undoc-members: :show-inheritance: +feast.infra.online\_stores.faiss\_online\_store module +------------------------------------------------------ + +.. automodule:: feast.infra.online_stores.faiss_online_store + :members: + :undoc-members: + :show-inheritance: + feast.infra.online\_stores.helpers module ----------------------------------------- diff --git a/sdk/python/docs/source/feast.infra.rst b/sdk/python/docs/source/feast.infra.rst index b0046a2719e..791a4ace832 100644 --- a/sdk/python/docs/source/feast.infra.rst +++ b/sdk/python/docs/source/feast.infra.rst @@ -51,6 +51,14 @@ feast.infra.provider module :undoc-members: :show-inheritance: +feast.infra.supported\_async\_methods module +-------------------------------------------- + +.. automodule:: feast.infra.supported_async_methods + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/sdk/python/docs/source/feast.infra.utils.couchbase.rst b/sdk/python/docs/source/feast.infra.utils.couchbase.rst new file mode 100644 index 00000000000..d6d2025c428 --- /dev/null +++ b/sdk/python/docs/source/feast.infra.utils.couchbase.rst @@ -0,0 +1,21 @@ +feast.infra.utils.couchbase package +=================================== + +Submodules +---------- + +feast.infra.utils.couchbase.couchbase\_utils module +--------------------------------------------------- + +.. automodule:: feast.infra.utils.couchbase.couchbase_utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: feast.infra.utils.couchbase + :members: + :undoc-members: + :show-inheritance: diff --git a/sdk/python/docs/source/feast.infra.utils.rst b/sdk/python/docs/source/feast.infra.utils.rst index 083259bfaae..cfa82dc5fd2 100644 --- a/sdk/python/docs/source/feast.infra.utils.rst +++ b/sdk/python/docs/source/feast.infra.utils.rst @@ -7,6 +7,7 @@ Subpackages .. toctree:: :maxdepth: 4 + feast.infra.utils.couchbase feast.infra.utils.postgres feast.infra.utils.snowflake diff --git a/sdk/python/docs/source/feast.rst b/sdk/python/docs/source/feast.rst index ea34c3d8dd9..fdb91b2342d 100644 --- a/sdk/python/docs/source/feast.rst +++ b/sdk/python/docs/source/feast.rst @@ -332,6 +332,14 @@ feast.saved\_dataset module :undoc-members: :show-inheritance: +feast.ssl\_ca\_trust\_store\_setup module +----------------------------------------- + +.. automodule:: feast.ssl_ca_trust_store_setup + :members: + :undoc-members: + :show-inheritance: + feast.stream\_feature\_view module ---------------------------------- diff --git a/sdk/python/feast/batch_feature_view.py b/sdk/python/feast/batch_feature_view.py index af7a5e68fd6..c66af0db18e 100644 --- a/sdk/python/feast/batch_feature_view.py +++ b/sdk/python/feast/batch_feature_view.py @@ -1,6 +1,10 @@ +import functools import warnings from datetime import datetime, timedelta -from typing import Dict, List, Optional, Tuple +from types import FunctionType +from typing import Dict, List, Optional, Tuple, Union + +import dill from feast import flags_helper from feast.data_source import DataSource @@ -8,6 +12,8 @@ from feast.feature_view import FeatureView from feast.field import Field from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode warnings.simplefilter("once", RuntimeWarning) @@ -42,6 +48,7 @@ class BatchFeatureView(FeatureView): """ name: str + mode: Union[TransformationMode, str] entities: List[str] ttl: Optional[timedelta] source: DataSource @@ -54,11 +61,15 @@ class BatchFeatureView(FeatureView): owner: str timestamp_field: str materialization_intervals: List[Tuple[datetime, datetime]] + udf: Optional[FunctionType] + udf_string: Optional[str] + feature_transformation: Transformation def __init__( self, *, name: str, + mode: Union[TransformationMode, str] = TransformationMode.PYTHON, source: DataSource, entities: Optional[List[Entity]] = None, ttl: Optional[timedelta] = None, @@ -67,6 +78,9 @@ def __init__( description: str = "", owner: str = "", schema: Optional[List[Field]] = None, + udf: Optional[FunctionType] = None, + udf_string: Optional[str] = "", + feature_transformation: Optional[Transformation] = None, ): if not flags_helper.is_test(): warnings.warn( @@ -84,6 +98,13 @@ def __init__( f"or CUSTOM_SOURCE, got {type(source).__name__}: {source.name} instead " ) + self.mode = mode + self.udf = udf + self.udf_string = udf_string + self.feature_transformation = ( + feature_transformation or self.get_feature_transformation() + ) + super().__init__( name=name, entities=entities, @@ -95,3 +116,79 @@ def __init__( schema=schema, source=source, ) + + def get_feature_transformation(self) -> Transformation: + if not self.udf: + raise ValueError( + "Either a UDF or a feature transformation must be provided for BatchFeatureView" + ) + if self.mode in ( + TransformationMode.PANDAS, + TransformationMode.PYTHON, + TransformationMode.SQL, + ) or self.mode in ("pandas", "python", "sql"): + return Transformation( + mode=self.mode, udf=self.udf, udf_string=self.udf_string or "" + ) + else: + raise ValueError( + f"Unsupported transformation mode: {self.mode} for StreamFeatureView" + ) + + +def batch_feature_view( + *, + name: Optional[str] = None, + mode: Union[TransformationMode, str] = TransformationMode.PYTHON, + entities: Optional[List[str]] = None, + ttl: Optional[timedelta] = None, + source: Optional[DataSource] = None, + tags: Optional[Dict[str, str]] = None, + online: bool = True, + description: str = "", + owner: str = "", + schema: Optional[List[Field]] = None, +): + """ + Args: + name: + entities: + ttl: + source: + tags: + online: + description: + owner: + schema: + + Returns: + + """ + + def mainify(obj): + # Needed to allow dill to properly serialize the udf. Otherwise, clients will need to have a file with the same + # name as the original file defining the sfv. + if obj.__module__ != "__main__": + obj.__module__ = "__main__" + + def decorator(user_function): + udf_string = dill.source.getsource(user_function) + mainify(user_function) + batch_feature_view_obj = BatchFeatureView( + name=name or user_function.__name__, + mode=mode, + entities=entities, + ttl=ttl, + source=source, + tags=tags, + online=online, + description=description, + owner=owner, + schema=schema, + udf=user_function, + udf_string=udf_string, + ) + functools.update_wrapper(wrapper=batch_feature_view_obj, wrapped=user_function) + return batch_feature_view_obj + + return decorator diff --git a/sdk/python/feast/cli.py b/sdk/python/feast/cli.py index 06db93d6803..890c79aa856 100644 --- a/sdk/python/feast/cli.py +++ b/sdk/python/feast/cli.py @@ -137,6 +137,24 @@ def version(): print(f'Feast SDK Version: "{importlib_version("feast")}"') +@cli.command() +@click.pass_context +def configuration(ctx: click.Context): + """ + Display Feast configuration + """ + repo = ctx.obj["CHDIR"] + fs_yaml_file = ctx.obj["FS_YAML_FILE"] + cli_check_repo(repo, fs_yaml_file) + repo_config = load_repo_config(repo, fs_yaml_file) + if repo_config: + config_dict = repo_config.model_dump(by_alias=True, exclude_unset=True) + config_dict.pop("repo_path", None) + print(yaml.dump(config_dict, default_flow_style=False, sort_keys=False)) + else: + print("No configuration found.") + + @cli.command() @click.option( "--host", @@ -865,6 +883,7 @@ def materialize_incremental_command(ctx: click.Context, end_ts: str, views: List "cassandra", "hazelcast", "ikv", + "couchbase", ], case_sensitive=False, ), @@ -981,7 +1000,6 @@ def serve_command( raise click.BadParameter( "Please pass --cert and --key args to start the feature server in TLS mode." ) - store = create_feature_store(ctx) store.serve( @@ -1114,16 +1132,40 @@ def serve_registry_command( default=DEFAULT_OFFLINE_SERVER_PORT, help="Specify a port for the server", ) +@click.option( + "--key", + "-k", + "tls_key_path", + type=click.STRING, + default="", + show_default=False, + help="path to TLS certificate private key. You need to pass --cert as well to start server in TLS mode", +) +@click.option( + "--cert", + "-c", + "tls_cert_path", + type=click.STRING, + default="", + show_default=False, + help="path to TLS certificate public key. You need to pass --key as well to start server in TLS mode", +) @click.pass_context def serve_offline_command( ctx: click.Context, host: str, port: int, + tls_key_path: str, + tls_cert_path: str, ): """Start a remote server locally on a given host, port.""" + if (tls_key_path and not tls_cert_path) or (not tls_key_path and tls_cert_path): + raise click.BadParameter( + "Please pass --cert and --key args to start the offline server in TLS mode." + ) store = create_feature_store(ctx) - store.serve_offline(host, port) + store.serve_offline(host, port, tls_key_path, tls_cert_path) @cli.command("validate") diff --git a/sdk/python/feast/driver_test_data.py b/sdk/python/feast/driver_test_data.py index 23f1f124774..d96c9c6d387 100644 --- a/sdk/python/feast/driver_test_data.py +++ b/sdk/python/feast/driver_test_data.py @@ -2,10 +2,10 @@ import itertools from datetime import timedelta, timezone from enum import Enum +from zoneinfo import ZoneInfo import numpy as np import pandas as pd -from zoneinfo import ZoneInfo from feast.infra.offline_stores.offline_utils import ( DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL, diff --git a/sdk/python/feast/embedded_go/online_features_service.py b/sdk/python/feast/embedded_go/online_features_service.py index 867431fcf85..8dd7b5ba0a1 100644 --- a/sdk/python/feast/embedded_go/online_features_service.py +++ b/sdk/python/feast/embedded_go/online_features_service.py @@ -1,3 +1,4 @@ +import logging from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union @@ -36,6 +37,8 @@ MILLI_SECOND = 1000 * MICRO_SECOND SECOND = 1000 * MILLI_SECOND +logger = logging.getLogger(__name__) + class EmbeddedOnlineFeatureServer: def __init__( @@ -243,28 +246,32 @@ def transformation_callback( output_schema_ptr: int, full_feature_names: bool, ) -> int: - odfv = fs.get_on_demand_feature_view(on_demand_feature_view_name) + try: + odfv = fs.get_on_demand_feature_view(on_demand_feature_view_name) - input_record = pa.RecordBatch._import_from_c(input_arr_ptr, input_schema_ptr) + input_record = pa.RecordBatch._import_from_c(input_arr_ptr, input_schema_ptr) - # For some reason, the callback is called with `full_feature_names` as a 1 if True or 0 if false. This handles - # the typeguard requirement. - full_feature_names = bool(full_feature_names) + # For some reason, the callback is called with `full_feature_names` as a 1 if True or 0 if false. This handles + # the typeguard requirement. + full_feature_names = bool(full_feature_names) - if odfv.mode != "pandas": - raise Exception( - f"OnDemandFeatureView mode '{odfv.mode} not supported by EmbeddedOnlineFeatureServer." - ) + if odfv.mode != "pandas": + raise Exception( + f"OnDemandFeatureView mode '{odfv.mode} not supported by EmbeddedOnlineFeatureServer." + ) - output = odfv.get_transformed_features_df( # type: ignore - input_record.to_pandas(), full_feature_names=full_feature_names - ) - output_record = pa.RecordBatch.from_pandas(output) + output = odfv.get_transformed_features_df( # type: ignore + input_record.to_pandas(), full_feature_names=full_feature_names + ) + output_record = pa.RecordBatch.from_pandas(output) - output_record.schema._export_to_c(output_schema_ptr) - output_record._export_to_c(output_arr_ptr) + output_record.schema._export_to_c(output_schema_ptr) + output_record._export_to_c(output_arr_ptr) - return output_record.num_rows + return output_record.num_rows + except Exception as e: + logger.exception(f"transformation callback failed with exception: {e}", e) + return 0 def logging_callback( diff --git a/sdk/python/feast/entity.py b/sdk/python/feast/entity.py index 290e6307a42..7f4eadc6352 100644 --- a/sdk/python/feast/entity.py +++ b/sdk/python/feast/entity.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from datetime import datetime from typing import Dict, List, Optional @@ -79,6 +80,13 @@ def __init__( ValueError: Parameters are specified incorrectly. """ self.name = name + if value_type is None: + warnings.warn( + "Entity value_type will be mandatory in the next release. " + "Please specify a value_type for entity '%s'." % name, + DeprecationWarning, + stacklevel=2, + ) self.value_type = value_type or ValueType.UNKNOWN if join_keys and len(join_keys) > 1: @@ -165,13 +173,12 @@ def from_proto(cls, entity_proto: EntityProto): entity = cls( name=entity_proto.spec.name, join_keys=[entity_proto.spec.join_key], + value_type=ValueType(entity_proto.spec.value_type), description=entity_proto.spec.description, tags=dict(entity_proto.spec.tags), owner=entity_proto.spec.owner, ) - entity.value_type = ValueType(entity_proto.spec.value_type) - if entity_proto.meta.HasField("created_timestamp"): entity.created_timestamp = entity_proto.meta.created_timestamp.ToDatetime() if entity_proto.meta.HasField("last_updated_timestamp"): diff --git a/sdk/python/feast/errors.py b/sdk/python/feast/errors.py index 84a9f12ec4f..8c72422f44e 100644 --- a/sdk/python/feast/errors.py +++ b/sdk/python/feast/errors.py @@ -32,7 +32,7 @@ def __str__(self) -> str: def __repr__(self) -> str: if hasattr(self, "__overridden_message__"): - return f"{type(self).__name__}('{getattr(self,'__overridden_message__')}')" + return f"{type(self).__name__}('{getattr(self, '__overridden_message__')}')" return super().__repr__() def to_error_detail(self) -> str: diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py index 1f4918fe7a5..434efa7e44b 100644 --- a/sdk/python/feast/feature_server.py +++ b/sdk/python/feast/feature_server.py @@ -1,17 +1,29 @@ +import asyncio +import os import sys import threading import time import traceback from contextlib import asynccontextmanager +from importlib import resources as importlib_resources from typing import Any, Dict, List, Optional import pandas as pd import psutil from dateutil import parser -from fastapi import Depends, FastAPI, Request, Response, status +from fastapi import ( + Depends, + FastAPI, + Request, + Response, + WebSocket, + WebSocketDisconnect, + status, +) from fastapi.concurrency import run_in_threadpool from fastapi.logger import logger from fastapi.responses import JSONResponse +from fastapi.staticfiles import StaticFiles from google.protobuf.json_format import MessageToDict from prometheus_client import Gauge, start_http_server from pydantic import BaseModel @@ -74,12 +86,83 @@ class GetOnlineFeaturesRequest(BaseModel): feature_service: Optional[str] = None features: Optional[List[str]] = None full_feature_names: bool = False + query_embedding: Optional[List[float]] = None + query_string: Optional[str] = None + + +class ChatMessage(BaseModel): + role: str + content: str + + +class ChatRequest(BaseModel): + messages: List[ChatMessage] + + +def _get_features(request: GetOnlineFeaturesRequest, store: "feast.FeatureStore"): + if request.feature_service: + feature_service = store.get_feature_service( + request.feature_service, allow_cache=True + ) + assert_permissions( + resource=feature_service, actions=[AuthzedAction.READ_ONLINE] + ) + features = feature_service # type: ignore + else: + all_feature_views, all_on_demand_feature_views = ( + utils._get_feature_views_to_use( + store.registry, + store.project, + request.features, + allow_cache=True, + hide_dummy_entity=False, + ) + ) + for feature_view in all_feature_views: + assert_permissions( + resource=feature_view, actions=[AuthzedAction.READ_ONLINE] + ) + for od_feature_view in all_on_demand_feature_views: + assert_permissions( + resource=od_feature_view, actions=[AuthzedAction.READ_ONLINE] + ) + features = request.features # type: ignore + return features def get_app( store: "feast.FeatureStore", registry_ttl_sec: int = DEFAULT_FEATURE_SERVER_REGISTRY_TTL, ): + """ + Creates a FastAPI app that can be used to start a feature server. + + Args: + store: The FeatureStore to use for serving features + registry_ttl_sec: The TTL in seconds for the registry cache + + Returns: + A FastAPI app + + Example: + ```python + from feast import FeatureStore + + store = FeatureStore(repo_path="feature_repo") + app = get_app(store) + ``` + + The app provides the following endpoints: + - `/get-online-features`: Get online features + - `/retrieve-online-documents`: Retrieve online documents + - `/push`: Push features to the feature store + - `/write-to-online-store`: Write to the online store + - `/health`: Health check + - `/materialize`: Materialize features + - `/materialize-incremental`: Materialize features incrementally + - `/chat`: Chat UI + - `/ws/chat`: WebSocket endpoint for chat + """ proto_json.patch() # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down registry_proto = None @@ -121,33 +204,7 @@ async def lifespan(app: FastAPI): ) async def get_online_features(request: GetOnlineFeaturesRequest) -> Dict[str, Any]: # Initialize parameters for FeatureStore.get_online_features(...) call - if request.feature_service: - feature_service = store.get_feature_service( - request.feature_service, allow_cache=True - ) - assert_permissions( - resource=feature_service, actions=[AuthzedAction.READ_ONLINE] - ) - features = feature_service # type: ignore - else: - all_feature_views, all_on_demand_feature_views = ( - utils._get_feature_views_to_use( - store.registry, - store.project, - request.features, - allow_cache=True, - hide_dummy_entity=False, - ) - ) - for feature_view in all_feature_views: - assert_permissions( - resource=feature_view, actions=[AuthzedAction.READ_ONLINE] - ) - for od_feature_view in all_on_demand_feature_views: - assert_permissions( - resource=od_feature_view, actions=[AuthzedAction.READ_ONLINE] - ) - features = request.features # type: ignore + features = await run_in_threadpool(_get_features, request, store) read_params = dict( features=features, @@ -163,9 +220,47 @@ async def get_online_features(request: GetOnlineFeaturesRequest) -> Dict[str, An ) # Convert the Protobuf object to JSON and return it - return MessageToDict( - response.proto, preserving_proto_field_name=True, float_precision=18 + response_dict = await run_in_threadpool( + MessageToDict, + response.proto, + preserving_proto_field_name=True, + float_precision=18, ) + return response_dict + + @app.post( + "/retrieve-online-documents", + dependencies=[Depends(inject_user_details)], + ) + async def retrieve_online_documents( + request: GetOnlineFeaturesRequest, + ) -> Dict[str, Any]: + logger.warn( + "This endpoint is in alpha and will be moved to /get-online-features when stable." + ) + # Initialize parameters for FeatureStore.retrieve_online_documents_v2(...) call + features = await run_in_threadpool(_get_features, request, store) + + read_params = dict( + features=features, + entity_rows=request.entities, + full_feature_names=request.full_feature_names, + query=request.query_embedding, + query_string=request.query_string, + ) + + response = await run_in_threadpool( + lambda: store.retrieve_online_documents_v2(**read_params) # type: ignore + ) + + # Convert the Protobuf object to JSON and return it + response_dict = await run_in_threadpool( + MessageToDict, + response.proto, + preserving_proto_field_name=True, + float_precision=18, + ) + return response_dict @app.post("/push", dependencies=[Depends(inject_user_details)]) async def push(request: PushFeaturesRequest) -> None: @@ -252,6 +347,21 @@ async def health(): else Response(status_code=status.HTTP_503_SERVICE_UNAVAILABLE) ) + @app.post("/chat") + async def chat(request: ChatRequest): + # Process the chat request + # For now, just return dummy text + return {"response": "This is a dummy response from the Feast feature server."} + + @app.get("/chat") + async def chat_ui(): + # Serve the chat UI + static_dir_ref = importlib_resources.files(__spec__.parent) / "static/chat" # type: ignore[name-defined, arg-type] + with importlib_resources.as_file(static_dir_ref) as static_dir: + with open(os.path.join(static_dir, "index.html")) as f: + content = f.read() + return Response(content=content, media_type="text/html") + @app.post("/materialize", dependencies=[Depends(inject_user_details)]) def materialize(request: MaterializeRequest) -> None: for feature_view in request.feature_views or []: @@ -292,6 +402,46 @@ async def rest_exception_handler(request: Request, exc: Exception): content=str(exc), ) + # Chat WebSocket connection manager + class ConnectionManager: + def __init__(self): + self.active_connections: List[WebSocket] = [] + + async def connect(self, websocket: WebSocket): + await websocket.accept() + self.active_connections.append(websocket) + + def disconnect(self, websocket: WebSocket): + self.active_connections.remove(websocket) + + async def send_message(self, message: str, websocket: WebSocket): + await websocket.send_text(message) + + manager = ConnectionManager() + + @app.websocket("/ws/chat") + async def websocket_endpoint(websocket: WebSocket): + await manager.connect(websocket) + try: + while True: + message = await websocket.receive_text() + # Process the received message (currently unused but kept for future implementation) + # For now, just return dummy text + response = f"You sent: '{message}'. This is a dummy response from the Feast feature server." + + # Stream the response word by word + words = response.split() + for word in words: + await manager.send_message(word + " ", websocket) + await asyncio.sleep(0.1) # Add a small delay between words + except WebSocketDisconnect: + manager.disconnect(websocket) + + # Mount static files + static_dir_ref = importlib_resources.files(__spec__.parent) / "static" # type: ignore[name-defined, arg-type] + with importlib_resources.as_file(static_dir_ref) as static_dir: + app.mount("/static", StaticFiles(directory=static_dir), name="static") + return app diff --git a/sdk/python/feast/feature_store.py b/sdk/python/feast/feature_store.py index e6cdf90b4a3..7073a20d1e0 100644 --- a/sdk/python/feast/feature_store.py +++ b/sdk/python/feast/feature_store.py @@ -62,6 +62,7 @@ from feast.feast_object import FeastObject from feast.feature_service import FeatureService from feast.feature_view import DUMMY_ENTITY, DUMMY_ENTITY_NAME, FeatureView +from feast.field import Field from feast.inference import ( update_data_sources_with_inferred_event_timestamp_col, update_feature_views_with_inferred_features_and_entities, @@ -86,9 +87,11 @@ from feast.repo_config import RepoConfig, load_repo_config from feast.repo_contents import RepoContents from feast.saved_dataset import SavedDataset, SavedDatasetStorage, ValidationReference +from feast.ssl_ca_trust_store_setup import configure_ca_trust_store_env_variables from feast.stream_feature_view import StreamFeatureView +from feast.transformation.pandas_transformation import PandasTransformation +from feast.transformation.python_transformation import PythonTransformation from feast.utils import _utc_now -from feast.version import get_version warnings.simplefilter("once", DeprecationWarning) @@ -130,6 +133,8 @@ def __init__( if fs_yaml_file is not None and config is not None: raise ValueError("You cannot specify both fs_yaml_file and config.") + configure_ca_trust_store_env_variables() + if repo_path: self.repo_path = Path(repo_path) else: @@ -171,10 +176,6 @@ def __init__( self._provider = get_provider(self.config) - def version(self) -> str: - """Returns the version of the current Feast SDK/CLI.""" - return get_version() - def __repr__(self) -> str: return ( f"FeatureStore(\n" @@ -867,8 +868,7 @@ def apply( views_to_update = [ ob for ob in objects - if - ( + if ( # BFVs are not handled separately from FVs right now. (isinstance(ob, FeatureView) or isinstance(ob, BatchFeatureView)) and not isinstance(ob, StreamFeatureView) @@ -1548,6 +1548,64 @@ def _get_feature_view_and_df_for_online_write( df = pd.DataFrame(df) except Exception as _: raise DataFrameSerializationError(df) + + # # Apply transformations if this is an OnDemandFeatureView with write_to_online_store=True + if ( + isinstance(feature_view, OnDemandFeatureView) + and feature_view.write_to_online_store + ): + if ( + feature_view.mode == "python" + and isinstance( + feature_view.feature_transformation, PythonTransformation + ) + and df is not None + ): + input_dict = ( + df.to_dict(orient="records")[0] + if feature_view.singleton + else df.to_dict(orient="list") + ) + transformed_data = feature_view.feature_transformation.udf(input_dict) + if feature_view.write_to_online_store: + entities = [ + self.get_entity(entity) + for entity in (feature_view.entities or []) + ] + join_keys = [entity.join_key for entity in entities if entity] + join_keys = [k for k in join_keys if k in input_dict.keys()] + transformed_df = pd.DataFrame(transformed_data) + input_df = pd.DataFrame(input_dict) + if input_df.shape[0] == transformed_df.shape[0]: + for k in input_dict: + if k not in transformed_data: + transformed_data[k] = input_dict[k] + transformed_df = pd.DataFrame(transformed_data) + else: + transformed_df = pd.merge( + transformed_df, + input_df, + how="left", + on=join_keys, + ) + else: + # overwrite any transformed features and update the dictionary + for k in input_dict: + if k not in transformed_data: + transformed_data[k] = input_dict[k] + df = pd.DataFrame(transformed_data) + elif feature_view.mode == "pandas" and isinstance( + feature_view.feature_transformation, PandasTransformation + ): + transformed_df = feature_view.feature_transformation.udf(df) + if df is not None: + for col in df.columns: + transformed_df[col] = df[col] + df = transformed_df + + else: + raise Exception("Unsupported OnDemandFeatureView mode") + return feature_view, df def write_to_online_store( @@ -1755,10 +1813,11 @@ async def get_online_features_async( def retrieve_online_documents( self, - feature: str, + feature: Optional[str], query: Union[str, List[float]], top_k: int, - distance_metric: Optional[str] = None, + features: Optional[List[str]] = None, + distance_metric: Optional[str] = "L2", ) -> OnlineResponse: """ Retrieves the top k closest document features. Note, embeddings are a subset of features. @@ -1767,6 +1826,7 @@ def retrieve_online_documents( feature: The list of document features that should be retrieved from the online document store. These features can be specified either as a list of string document feature references or as a feature service. String feature references must have format "feature_view:feature", e.g, "document_fv:document_embeddings". + features: The list of features that should be retrieved from the online store. query: The query to retrieve the closest document features for. top_k: The number of closest document features to retrieve. distance_metric: The distance metric to use for retrieval. @@ -1775,18 +1835,44 @@ def retrieve_online_documents( raise ValueError( "Using embedding functionality is not supported for document retrieval. Please embed the query before calling retrieve_online_documents." ) + feature_list: List[str] = ( + features + if features is not None + else ([feature] if feature is not None else []) + ) + ( available_feature_views, _, ) = utils._get_feature_views_to_use( registry=self._registry, project=self.project, - features=[feature], + features=feature_list, allow_cache=True, hide_dummy_entity=False, ) + if features: + feature_view_set = set() + for feature in features: + feature_view_name = feature.split(":")[0] + feature_view = self.get_feature_view(feature_view_name) + feature_view_set.add(feature_view.name) + if len(feature_view_set) > 1: + raise ValueError( + "Document retrieval only supports a single feature view." + ) + requested_feature = None + requested_features = [ + f.split(":")[1] for f in features if isinstance(f, str) and ":" in f + ] + else: + requested_feature = ( + feature.split(":")[1] if isinstance(feature, str) else feature + ) + requested_features = [requested_feature] if requested_feature else [] + requested_feature_view_name = ( - feature.split(":")[0] if isinstance(feature, str) else feature + feature.split(":")[0] if feature else list(feature_view_set)[0] ) for feature_view in available_feature_views: if feature_view.name == requested_feature_view_name: @@ -1795,19 +1881,19 @@ def retrieve_online_documents( raise ValueError( f"Feature view {requested_feature_view} not found in the registry." ) - requested_feature = ( - feature.split(":")[1] if isinstance(feature, str) else feature - ) + + requested_feature_view = available_feature_views[0] + provider = self._get_provider() document_features = self._retrieve_from_online_store( provider, requested_feature_view, requested_feature, + requested_features, query, top_k, distance_metric, ) - # TODO currently not return the vector value since it is same as feature value, if embedding is supported, # the feature value can be raw text before embedded entity_key_vals = [feature[1] for feature in document_features] @@ -1824,6 +1910,7 @@ def retrieve_online_documents( document_feature_vals = [feature[4] for feature in document_features] document_feature_distance_vals = [feature[5] for feature in document_features] online_features_response = GetOnlineFeaturesResponse(results=[]) + requested_feature = requested_feature or requested_features[0] utils._populate_result_rows_from_columnar( online_features_response=online_features_response, data={ @@ -1834,11 +1921,81 @@ def retrieve_online_documents( ) return OnlineResponse(online_features_response) + def retrieve_online_documents_v2( + self, + features: List[str], + top_k: int, + query: Optional[List[float]] = None, + query_string: Optional[str] = None, + distance_metric: Optional[str] = "L2", + ) -> OnlineResponse: + """ + Retrieves the top k closest document features. Note, embeddings are a subset of features. + + Args: + features: The list of features that should be retrieved from the online document store. These features can be + specified either as a list of string document feature references or as a feature service. String feature + references must have format "feature_view:feature", e.g, "document_fv:document_embeddings". + query: The embeded query to retrieve the closest document features for (optional) + top_k: The number of closest document features to retrieve. + distance_metric: The distance metric to use for retrieval. + query_string: The query string to retrieve the closest document features using keyword search (bm25). + """ + assert query is not None or query_string is not None, ( + "Either query or query_string must be provided." + ) + + ( + available_feature_views, + available_odfv_views, + ) = utils._get_feature_views_to_use( + registry=self._registry, + project=self.project, + features=features, + allow_cache=True, + hide_dummy_entity=False, + ) + feature_view_set = set() + for feature in features: + feature_view_name = feature.split(":")[0] + if feature_view_name in [fv.name for fv in available_odfv_views]: + feature_view: Union[OnDemandFeatureView, FeatureView] = ( + self.get_on_demand_feature_view(feature_view_name) + ) + else: + feature_view = self.get_feature_view(feature_view_name) + feature_view_set.add(feature_view.name) + if len(feature_view_set) > 1: + raise ValueError("Document retrieval only supports a single feature view.") + requested_features = [ + f.split(":")[1] for f in features if isinstance(f, str) and ":" in f + ] + if len(available_feature_views) == 0: + available_feature_views.extend(available_odfv_views) # type: ignore[arg-type] + + requested_feature_view = available_feature_views[0] + if not requested_feature_view: + raise ValueError( + f"Feature view {requested_feature_view} not found in the registry." + ) + + provider = self._get_provider() + return self._retrieve_from_online_store_v2( + provider, + requested_feature_view, + requested_features, + query, + top_k, + distance_metric, + query_string, + ) + def _retrieve_from_online_store( self, provider: Provider, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], query: List[float], top_k: int, distance_metric: Optional[str], @@ -1850,10 +2007,15 @@ def _retrieve_from_online_store( """ Search and return document features from the online document store. """ + vector_field_metadata = _get_feature_view_vector_field_metadata(table) + if vector_field_metadata: + distance_metric = vector_field_metadata.vector_search_metric + documents = provider.retrieve_online_documents( config=self.config, table=table, requested_feature=requested_feature, + requested_features=requested_features, query=query, top_k=top_k, distance_metric=distance_metric, @@ -1862,7 +2024,7 @@ def _retrieve_from_online_store( read_row_protos = [] row_ts_proto = Timestamp() - for row_ts, entity_key, feature_val, vector_value, distance_val in documents: + for row_ts, entity_key, feature_val, vector_value, distance_val in documents: # type: ignore[misc] # Reset timestamp to default or update if row_ts is not None if row_ts is not None: row_ts_proto.FromDatetime(row_ts) @@ -1887,6 +2049,73 @@ def _retrieve_from_online_store( ) return read_row_protos + def _retrieve_from_online_store_v2( + self, + provider: Provider, + table: FeatureView, + requested_features: List[str], + query: Optional[List[float]], + top_k: int, + distance_metric: Optional[str], + query_string: Optional[str], + ) -> OnlineResponse: + """ + Search and return document features from the online document store. + """ + vector_field_metadata = _get_feature_view_vector_field_metadata(table) + if vector_field_metadata: + distance_metric = vector_field_metadata.vector_search_metric + + documents = provider.retrieve_online_documents_v2( + config=self.config, + table=table, + requested_features=requested_features, + query=query, + top_k=top_k, + distance_metric=distance_metric, + query_string=query_string, + ) + + entity_key_dict: Dict[str, List[ValueProto]] = {} + datevals, entityvals, list_of_feature_dicts = [], [], [] + for row_ts, entity_key, feature_dict in documents: # type: ignore[misc] + datevals.append(row_ts) + entityvals.append(entity_key) + list_of_feature_dicts.append(feature_dict) + if entity_key: + for key, value in zip(entity_key.join_keys, entity_key.entity_values): + python_value = value + if key not in entity_key_dict: + entity_key_dict[key] = [] + entity_key_dict[key].append(python_value) + + table_entity_values, idxs, output_len = utils._get_unique_entities_from_values( + entity_key_dict, + ) + + features_to_request: List[str] = [] + if requested_features: + features_to_request = requested_features + ["distance"] + else: + features_to_request = ["distance"] + feature_data = utils._convert_rows_to_protobuf( + requested_features=features_to_request, + read_rows=list(zip(datevals, list_of_feature_dicts)), + ) + + online_features_response = GetOnlineFeaturesResponse(results=[]) + utils._populate_response_from_feature_data( + feature_data=feature_data, + indexes=idxs, + online_features_response=online_features_response, + full_feature_names=False, + requested_features=features_to_request, + table=table, + output_len=output_len, + ) + + return OnlineResponse(online_features_response) + def serve( self, host: str, @@ -1963,11 +2192,17 @@ def serve_registry( self, port=port, tls_key_path=tls_key_path, tls_cert_path=tls_cert_path ) - def serve_offline(self, host: str, port: int) -> None: + def serve_offline( + self, + host: str, + port: int, + tls_key_path: str = "", + tls_cert_path: str = "", + ) -> None: """Start offline server locally on a given port.""" from feast import offline_server - offline_server.start_server(self, host, port) + offline_server.start_server(self, host, port, tls_key_path, tls_cert_path) def serve_transformations(self, port: int) -> None: """Start the feature transformation server locally on a given port.""" @@ -1995,9 +2230,9 @@ def write_logged_features( if not isinstance(source, FeatureService): raise ValueError("Only feature service is currently supported as a source") - assert ( - source.logging_config is not None - ), "Feature service must be configured with logging config in order to use this functionality" + assert source.logging_config is not None, ( + "Feature service must be configured with logging config in order to use this functionality" + ) assert isinstance(logs, (pa.Table, Path)) @@ -2230,3 +2465,16 @@ def _validate_data_sources(data_sources: List[DataSource]): raise DataSourceRepeatNamesException(case_insensitive_ds_name) else: ds_names.add(case_insensitive_ds_name) + + +def _get_feature_view_vector_field_metadata( + feature_view: FeatureView, +) -> Optional[Field]: + vector_fields = [field for field in feature_view.schema if field.vector_index] + if len(vector_fields) > 1: + raise ValueError( + f"Feature view {feature_view.name} has multiple vector fields. Only one vector field per feature view is supported." + ) + if not vector_fields: + return None + return vector_fields[0] diff --git a/sdk/python/feast/feature_view.py b/sdk/python/feast/feature_view.py index 4aeb9a9c1dc..49b74893451 100644 --- a/sdk/python/feast/feature_view.py +++ b/sdk/python/feast/feature_view.py @@ -48,6 +48,7 @@ DUMMY_ENTITY = Entity( name=DUMMY_ENTITY_NAME, join_keys=[DUMMY_ENTITY_ID], + value_type=ValueType.UNKNOWN, ) DUMMY_ENTITY_FIELD = Field( name=DUMMY_ENTITY_ID, @@ -191,6 +192,10 @@ def __init__( else: features.append(field) + assert len([f for f in features if f.vector_index]) < 2, ( + f"Only one vector feature is allowed per feature view. Please update {self.name}." + ) + # TODO(felixwang9817): Add more robust validation of features. cols = [field.name for field in schema] for col in cols: @@ -343,12 +348,11 @@ def to_proto(self) -> FeatureViewProto: if self.stream_source: stream_source_proto = self.stream_source.to_proto() stream_source_proto.data_source_class_type = f"{self.stream_source.__class__.__module__}.{self.stream_source.__class__.__name__}" - spec = FeatureViewSpecProto( name=self.name, entities=self.entities, entity_columns=[field.to_proto() for field in self.entity_columns], - features=[field.to_proto() for field in self.features], + features=[feature.to_proto() for feature in self.features], description=self.description, tags=self.tags, owner=self.owner, diff --git a/sdk/python/feast/field.py b/sdk/python/feast/field.py index a41dcf5d5e6..fda1fbffe54 100644 --- a/sdk/python/feast/field.py +++ b/sdk/python/feast/field.py @@ -32,12 +32,16 @@ class Field: dtype: The type of the field, such as string or float. description: A human-readable description. tags: User-defined metadata in dictionary form. + vector_index: If set to True the field will be indexed for vector similarity search. + vector_search_metric: The metric used for vector similarity search. """ name: str dtype: FeastType description: str tags: Dict[str, str] + vector_index: bool + vector_search_metric: Optional[str] def __init__( self, @@ -46,6 +50,8 @@ def __init__( dtype: FeastType, description: str = "", tags: Optional[Dict[str, str]] = None, + vector_index: bool = False, + vector_search_metric: Optional[str] = None, ): """ Creates a Field object. @@ -55,11 +61,15 @@ def __init__( dtype: The type of the field, such as string or float. description (optional): A human-readable description. tags (optional): User-defined metadata in dictionary form. + vector_index (optional): If set to True the field will be indexed for vector similarity search. + vector_search_metric (optional): The metric used for vector similarity search. """ self.name = name self.dtype = dtype self.description = description self.tags = tags or {} + self.vector_index = vector_index + self.vector_search_metric = vector_search_metric def __eq__(self, other): if type(self) != type(other): @@ -70,6 +80,8 @@ def __eq__(self, other): or self.dtype != other.dtype or self.description != other.description or self.tags != other.tags + # or self.vector_index != other.vector_index + # or self.vector_search_metric != other.vector_search_metric ): return False return True @@ -87,6 +99,8 @@ def __repr__(self): f" dtype={self.dtype!r},\n" f" description={self.description!r},\n" f" tags={self.tags!r}\n" + f" vector_index={self.vector_index!r}\n" + f" vector_search_metric={self.vector_search_metric!r}\n" f")" ) @@ -96,11 +110,14 @@ def __str__(self): def to_proto(self) -> FieldProto: """Converts a Field object to its protobuf representation.""" value_type = self.dtype.to_value_type() + vector_search_metric = self.vector_search_metric or "" return FieldProto( name=self.name, value_type=value_type.value, description=self.description, tags=self.tags, + vector_index=self.vector_index, + vector_search_metric=vector_search_metric, ) @classmethod @@ -112,11 +129,15 @@ def from_proto(cls, field_proto: FieldProto): field_proto: FieldProto protobuf object """ value_type = ValueType(field_proto.value_type) + vector_search_metric = getattr(field_proto, "vector_search_metric", "") + vector_index = getattr(field_proto, "vector_index", False) return cls( name=field_proto.name, dtype=from_value_type(value_type=value_type), tags=dict(field_proto.tags), description=field_proto.description, + vector_index=vector_index, + vector_search_metric=vector_search_metric, ) @classmethod diff --git a/sdk/python/feast/infra/feature_servers/local_process/config.py b/sdk/python/feast/infra/feature_servers/local_process/config.py index 3d97912e4bd..942927ec2e8 100644 --- a/sdk/python/feast/infra/feature_servers/local_process/config.py +++ b/sdk/python/feast/infra/feature_servers/local_process/config.py @@ -4,5 +4,8 @@ class LocalFeatureServerConfig(BaseFeatureServerConfig): + # Feature server type selector. type: Literal["local"] = "local" - """Feature server type selector.""" + + # The endpoint definition for transformation_service + transformation_service_endpoint: str = "localhost:6569" diff --git a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile index df8e1c94d77..f4096e8494d 100644 --- a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile +++ b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile @@ -1,22 +1,7 @@ -FROM debian:11-slim -RUN apt update && \ - apt install -y \ - jq \ - python3 \ - python3-pip \ - python3-dev \ - build-essential +FROM registry.access.redhat.com/ubi8/python-311:1 -RUN pip install pip --upgrade -RUN pip install "feast[aws,gcp,snowflake,redis,go,mysql,postgres,opentelemetry,grpcio]" +COPY requirements.txt requirements.txt +RUN pip install -r requirements.txt - -RUN apt update -RUN apt install -y -V ca-certificates lsb-release wget -RUN wget https://apache.jfrog.io/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -RUN apt install -y -V ./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -RUN apt update -RUN apt -y install libarrow-dev # modify permissions to support running with a random uid -RUN mkdir -m 775 /.cache -RUN chmod g+w $(python3 -c "import feast.ui as _; print(_.__path__)" | tr -d "[']")/build/projects-list.json +RUN chmod g+w $(python -c "import feast.ui as ui; print(ui.__path__)" | tr -d "[']")/build/projects-list.json diff --git a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev index ce01c9809ba..31ac4a6366a 100644 --- a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev +++ b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile.dev @@ -1,24 +1,34 @@ -FROM debian:11-slim +FROM registry.access.redhat.com/ubi8/python-311:1 -RUN apt update && \ - apt install -y \ - jq \ - python3 \ - python3-pip \ - python3-dev \ - build-essential +USER 0 +RUN npm install -g yarn yalc && rm -rf .npm +USER default -RUN pip install pip --upgrade -COPY . . +COPY --chown=default .git ${APP_ROOT}/src/.git +COPY --chown=default setup.py pyproject.toml README.md Makefile ${APP_ROOT}/src/ +COPY --chown=default protos ${APP_ROOT}/src/protos +COPY --chown=default ui ${APP_ROOT}/src/ui +COPY --chown=default sdk/python ${APP_ROOT}/src/sdk/python -RUN pip install "feast[aws,gcp,snowflake,redis,go,mysql,postgres,opentelemetry,grpcio]" +WORKDIR ${APP_ROOT}/src/ui +RUN npm install && \ + npm run build:lib-dev && \ + rm -rf node_modules && \ + npm cache clean --force + +WORKDIR ${APP_ROOT}/src/sdk/python/feast/ui +RUN yalc add @feast-dev/feast-ui && \ + git diff package.json && \ + yarn install && \ + npm run build --omit=dev && \ + rm -rf node_modules && \ + npm cache clean --force && \ + yarn cache clean --all + +WORKDIR ${APP_ROOT}/src +RUN pip install --no-cache-dir pip-tools && \ + make install-python-ci-dependencies && \ + pip uninstall -y pip-tools -RUN apt update -RUN apt install -y -V ca-certificates lsb-release wget -RUN wget https://apache.jfrog.io/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -RUN apt install -y -V ./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb -RUN apt update -RUN apt -y install libarrow-dev # modify permissions to support running with a random uid -RUN mkdir -m 775 /.cache -RUN chmod g+w $(python3 -c "import feast.ui as _; print(_.__path__)" | tr -d "[']")/build/projects-list.json +RUN chmod g+w $(python -c "import feast.ui as ui; print(ui.__path__)" | tr -d "[']")/build/projects-list.json diff --git a/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt b/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt new file mode 100644 index 00000000000..20789a976d7 --- /dev/null +++ b/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt @@ -0,0 +1,2 @@ +# keep VERSION on line #2, this is critical to release CI +feast[aws,gcp,snowflake,redis,go,mysql,postgres,opentelemetry,grpcio,k8s,duckdb,milvus] == 0.46.0 diff --git a/sdk/python/feast/infra/key_encoding_utils.py b/sdk/python/feast/infra/key_encoding_utils.py index 1f9ffeef140..ce2692a4955 100644 --- a/sdk/python/feast/infra/key_encoding_utils.py +++ b/sdk/python/feast/infra/key_encoding_utils.py @@ -1,5 +1,7 @@ import struct -from typing import List, Tuple +from typing import List, Tuple, Union + +from google.protobuf.internal.containers import RepeatedScalarFieldContainer from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto from feast.protos.feast.types.Value_pb2 import Value as ValueProto @@ -81,6 +83,8 @@ def serialize_entity_key( ) output: List[bytes] = [] + if entity_key_serialization_version > 2: + output.append(struct.pack(" 2: @@ -120,7 +124,11 @@ def deserialize_entity_key( offset = 0 keys = [] values = [] - while offset < len(serialized_entity_key): + + num_keys = struct.unpack_from(" bytes: + """serializes a list of floats into a compact "raw bytes" format""" + return struct.pack(f"{vector_length}f", *vector) + + +def deserialize_f32(byte_vector: bytes, vector_length: int) -> List[float]: + """deserializes a list of floats from a compact "raw bytes" format""" + num_floats = vector_length // 4 # 4 bytes per float + return list(struct.unpack(f"{num_floats}f", byte_vector)) diff --git a/sdk/python/feast/infra/materialization/kubernetes/k8s_materialization_engine.py b/sdk/python/feast/infra/materialization/kubernetes/k8s_materialization_engine.py index a0ccbcd768b..96064409459 100644 --- a/sdk/python/feast/infra/materialization/kubernetes/k8s_materialization_engine.py +++ b/sdk/python/feast/infra/materialization/kubernetes/k8s_materialization_engine.py @@ -278,7 +278,7 @@ def _print_pod_logs(self, job_id, feature_view, offset=0): label_selector=f"job-name={job_id}", ).items for i, pod in enumerate(pods_list): - logger.info(f"Logging output for {feature_view.name} pod {offset+i}") + logger.info(f"Logging output for {feature_view.name} pod {offset + i}") try: logger.info( self.v1.read_namespaced_pod_log(pod.metadata.name, self.namespace) diff --git a/sdk/python/feast/infra/materialization/snowflake_engine.py b/sdk/python/feast/infra/materialization/snowflake_engine.py index f4be9dd7da5..2b18515ae44 100644 --- a/sdk/python/feast/infra/materialization/snowflake_engine.py +++ b/sdk/python/feast/infra/materialization/snowflake_engine.py @@ -206,9 +206,9 @@ def __init__( online_store: OnlineStore, **kwargs, ): - assert ( - repo_config.offline_store.type == "snowflake.offline" - ), "To use SnowflakeMaterializationEngine, you must use Snowflake as an offline store." + assert repo_config.offline_store.type == "snowflake.offline", ( + "To use SnowflakeMaterializationEngine, you must use Snowflake as an offline store." + ) super().__init__( repo_config=repo_config, @@ -241,10 +241,11 @@ def _materialize_one( project: str, tqdm_builder: Callable[[int], tqdm], ): - assert ( - isinstance(feature_view, BatchFeatureView) - or isinstance(feature_view, FeatureView) - ), "Snowflake can only materialize FeatureView & BatchFeatureView feature view types." + assert isinstance(feature_view, BatchFeatureView) or isinstance( + feature_view, FeatureView + ), ( + "Snowflake can only materialize FeatureView & BatchFeatureView feature view types." + ) entities = [] for entity_name in feature_view.entities: @@ -420,7 +421,7 @@ def generate_snowflake_materialization_query( {serial_func.upper()}({entity_names}, {entity_data}, {entity_types}) AS "entity_key", {features_str}, "{feature_view.batch_source.timestamp_field}" - {fv_created_str if fv_created_str else ''} + {fv_created_str if fv_created_str else ""} FROM ( {fv_latest_mapped_values_sql} ) @@ -460,7 +461,7 @@ def materialize_to_snowflake_online_store( "feature_name", "feature_value" AS "value", "{feature_view.batch_source.timestamp_field}" AS "event_ts" - {fv_created_str + ' AS "created_ts"' if fv_created_str else ''} + {fv_created_str + ' AS "created_ts"' if fv_created_str else ""} FROM ( {materialization_sql} ) @@ -472,16 +473,16 @@ def materialize_to_snowflake_online_store( online_table."feature_name" = latest_values."feature_name", online_table."value" = latest_values."value", online_table."event_ts" = latest_values."event_ts" - {',online_table."created_ts" = latest_values."created_ts"' if fv_created_str else ''} + {',online_table."created_ts" = latest_values."created_ts"' if fv_created_str else ""} WHEN NOT MATCHED THEN - INSERT ("entity_feature_key", "entity_key", "feature_name", "value", "event_ts" {', "created_ts"' if fv_created_str else ''}) + INSERT ("entity_feature_key", "entity_key", "feature_name", "value", "event_ts" {', "created_ts"' if fv_created_str else ""}) VALUES ( latest_values."entity_feature_key", latest_values."entity_key", latest_values."feature_name", latest_values."value", latest_values."event_ts" - {',latest_values."created_ts"' if fv_created_str else ''} + {',latest_values."created_ts"' if fv_created_str else ""} ) """ diff --git a/sdk/python/feast/infra/offline_stores/bigquery.py b/sdk/python/feast/infra/offline_stores/bigquery.py index 23f80d79ff2..f0516b594ee 100644 --- a/sdk/python/feast/infra/offline_stores/bigquery.py +++ b/sdk/python/feast/infra/offline_stores/bigquery.py @@ -901,7 +901,10 @@ def arrow_schema_to_bq_schema(arrow_schema: pyarrow.Schema) -> List[SchemaField] {{ featureview.created_timestamp_column ~ ' as created_timestamp,' if featureview.created_timestamp_column else '' }} {{ featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %} {% for feature in featureview.features %} - {{ feature }} as {% if full_feature_names %}{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}{% else %}{{ featureview.field_mapping.get(feature, feature) }}{% endif %}{% if loop.last %}{% else %}, {% endif %} + {{ feature | backticks }} as {% if full_feature_names %} + {{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}{% else %} + {{ featureview.field_mapping.get(feature, feature) | backticks }}{% endif %} + {% if loop.last %}{% else %}, {% endif %} {% endfor %} FROM {{ featureview.table_subquery }} WHERE {{ featureview.timestamp_field }} <= '{{ featureview.max_event_timestamp }}' @@ -995,14 +998,14 @@ def arrow_schema_to_bq_schema(arrow_schema: pyarrow.Schema) -> List[SchemaField] The entity_dataframe dataset being our source of truth here. */ -SELECT {{ final_output_feature_names | join(', ')}} +SELECT {{ final_output_feature_names | backticks | join(', ')}} FROM entity_dataframe {% for featureview in featureviews %} LEFT JOIN ( SELECT {{featureview.name}}__entity_row_unique_id {% for feature in featureview.features %} - ,{% if full_feature_names %}{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}{% else %}{{ featureview.field_mapping.get(feature, feature) }}{% endif %} + ,{% if full_feature_names %}{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}{% else %}{{ featureview.field_mapping.get(feature, feature) | backticks }}{% endif %} {% endfor %} FROM {{ featureview.name }}__cleaned ) USING ({{featureview.name}}__entity_row_unique_id) diff --git a/sdk/python/feast/infra/offline_stores/contrib/athena_offline_store/athena.py b/sdk/python/feast/infra/offline_stores/contrib/athena_offline_store/athena.py index ea0d6386cba..f49bfddb81d 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/athena_offline_store/athena.py +++ b/sdk/python/feast/infra/offline_stores/contrib/athena_offline_store/athena.py @@ -110,8 +110,8 @@ def pull_latest_from_table_or_query( SELECT {field_string}, ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS _feast_row FROM {from_expression} - WHERE {timestamp_field} BETWEEN TIMESTAMP '{start_date.strftime('%Y-%m-%d %H:%M:%S')}' AND TIMESTAMP '{end_date.strftime('%Y-%m-%d %H:%M:%S')}' - {"AND "+date_partition_column+" >= '"+start_date.strftime('%Y-%m-%d')+"' AND "+date_partition_column+" <= '"+end_date.strftime('%Y-%m-%d')+"' " if date_partition_column != "" and date_partition_column is not None else ''} + WHERE {timestamp_field} BETWEEN TIMESTAMP '{start_date.strftime("%Y-%m-%d %H:%M:%S")}' AND TIMESTAMP '{end_date.strftime("%Y-%m-%d %H:%M:%S")}' + {"AND " + date_partition_column + " >= '" + start_date.strftime("%Y-%m-%d") + "' AND " + date_partition_column + " <= '" + end_date.strftime("%Y-%m-%d") + "' " if date_partition_column != "" and date_partition_column is not None else ""} ) WHERE _feast_row = 1 """ @@ -151,7 +151,7 @@ def pull_all_from_table_or_query( SELECT {field_string} FROM {from_expression} WHERE {timestamp_field} BETWEEN TIMESTAMP '{start_date.astimezone(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]}' AND TIMESTAMP '{end_date.astimezone(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]}' - {"AND "+date_partition_column+" >= '"+start_date.strftime('%Y-%m-%d')+"' AND "+date_partition_column+" <= '"+end_date.strftime('%Y-%m-%d')+"' " if date_partition_column != "" and date_partition_column is not None else ''} + {"AND " + date_partition_column + " >= '" + start_date.strftime("%Y-%m-%d") + "' AND " + date_partition_column + " <= '" + end_date.strftime("%Y-%m-%d") + "' " if date_partition_column != "" and date_partition_column is not None else ""} """ return AthenaRetrievalJob( diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_columnar_repo_configuration.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_columnar_repo_configuration.py new file mode 100644 index 00000000000..745a074a757 --- /dev/null +++ b/sdk/python/feast/infra/offline_stores/contrib/couchbase_columnar_repo_configuration.py @@ -0,0 +1,20 @@ +from feast.infra.offline_stores.contrib.couchbase_offline_store.tests.data_source import ( + CouchbaseColumnarDataSourceCreator, +) +from tests.integration.feature_repos.integration_test_repo_config import ( + IntegrationTestRepoConfig, +) +from tests.integration.feature_repos.repo_configuration import REDIS_CONFIG +from tests.integration.feature_repos.universal.online_store.redis import ( + RedisOnlineStoreCreator, +) + +FULL_REPO_CONFIGS = [ + IntegrationTestRepoConfig( + provider="aws", + offline_store_creator=CouchbaseColumnarDataSourceCreator, + ), +] + +AVAILABLE_OFFLINE_STORES = [("aws", CouchbaseColumnarDataSourceCreator)] +AVAILABLE_ONLINE_STORES = {"redis": (REDIS_CONFIG, RedisOnlineStoreCreator)} diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/__init__.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase.py new file mode 100644 index 00000000000..a90d6c2172b --- /dev/null +++ b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase.py @@ -0,0 +1,729 @@ +import contextlib +import warnings +from dataclasses import asdict +from datetime import datetime, timedelta +from typing import ( + Any, + Callable, + ContextManager, + Dict, + Iterator, + KeysView, + List, + Literal, + Optional, + Tuple, + Union, + cast, +) + +import numpy as np +import pandas as pd +import pyarrow as pa +from couchbase_columnar.cluster import Cluster +from couchbase_columnar.common.result import BlockingQueryResult +from couchbase_columnar.credential import Credential +from couchbase_columnar.options import ClusterOptions, QueryOptions, TimeoutOptions +from jinja2 import BaseLoader, Environment +from pydantic import StrictFloat, StrictStr + +from feast.data_source import DataSource +from feast.errors import InvalidEntityType, ZeroRowsQueryResult +from feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_VAL, FeatureView +from feast.infra.offline_stores.offline_store import ( + OfflineStore, + RetrievalJob, + RetrievalMetadata, +) +from feast.infra.registry.base_registry import BaseRegistry +from feast.infra.utils.couchbase.couchbase_utils import normalize_timestamp +from feast.on_demand_feature_view import OnDemandFeatureView +from feast.repo_config import FeastConfigBaseModel, RepoConfig +from feast.saved_dataset import SavedDatasetStorage + +from ... import offline_utils +from .couchbase_source import ( + CouchbaseColumnarSource, + SavedDatasetCouchbaseColumnarStorage, +) + +# Only prints out runtime warnings once. +warnings.simplefilter("once", RuntimeWarning) + + +class CouchbaseColumnarOfflineStoreConfig(FeastConfigBaseModel): + """Offline store config for Couchbase Columnar""" + + type: Literal["couchbase.offline"] = "couchbase.offline" + + connection_string: Optional[StrictStr] = None + user: Optional[StrictStr] = None + password: Optional[StrictStr] = None + timeout: StrictFloat = 120 + + +class CouchbaseColumnarOfflineStore(OfflineStore): + @staticmethod + def pull_latest_from_table_or_query( + config: RepoConfig, + data_source: DataSource, + join_key_columns: List[str], + feature_name_columns: List[str], + timestamp_field: str, + created_timestamp_column: Optional[str], + start_date: datetime, + end_date: datetime, + ) -> RetrievalJob: + """ + Fetch the latest rows for each join key. + """ + warnings.warn( + "This offline store is an experimental feature in alpha development. " + "Some functionality may still be unstable so functionality can change in the future.", + RuntimeWarning, + ) + assert isinstance(config.offline_store, CouchbaseColumnarOfflineStoreConfig) + assert isinstance(data_source, CouchbaseColumnarSource) + from_expression = data_source.get_table_query_string() + + partition_by_join_key_string = ", ".join(_append_alias(join_key_columns, "a")) + if partition_by_join_key_string != "": + partition_by_join_key_string = ( + "PARTITION BY " + partition_by_join_key_string + ) + timestamps = [timestamp_field] + if created_timestamp_column: + timestamps.append(created_timestamp_column) + timestamp_desc_string = " DESC, ".join(_append_alias(timestamps, "a")) + " DESC" + a_field_string = ", ".join( + _append_alias(join_key_columns + feature_name_columns + timestamps, "a") + ) + b_field_string = ", ".join( + _append_alias(join_key_columns + feature_name_columns + timestamps, "b") + ) + + start_date_normalized = normalize_timestamp(start_date) + end_date_normalized = normalize_timestamp(end_date) + + query = f""" + SELECT + {b_field_string} + {f", {repr(DUMMY_ENTITY_VAL)} AS {DUMMY_ENTITY_ID}" if not join_key_columns else ""} + FROM ( + SELECT {a_field_string}, + ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS _feast_row + FROM {from_expression} a + WHERE a.{timestamp_field} BETWEEN '{start_date_normalized}' AND '{end_date_normalized}' + ) b + WHERE _feast_row = 1 + """ + + return CouchbaseColumnarRetrievalJob( + query=query, + config=config, + full_feature_names=False, + on_demand_feature_views=None, + timestamp_field=timestamp_field, + ) + + @staticmethod + def get_historical_features( + config: RepoConfig, + feature_views: List[FeatureView], + feature_refs: List[str], + entity_df: Union[pd.DataFrame, str], + registry: BaseRegistry, + project: str, + full_feature_names: bool = False, + ) -> RetrievalJob: + """ + Retrieve historical features using point-in-time joins. + """ + warnings.warn( + "This offline store is an experimental feature in alpha development. " + "Some functionality may still be unstable so functionality can change in the future.", + RuntimeWarning, + ) + assert isinstance(config.offline_store, CouchbaseColumnarOfflineStoreConfig) + for fv in feature_views: + assert isinstance(fv.batch_source, CouchbaseColumnarSource) + + entity_schema = _get_entity_schema(entity_df, config) + + entity_df_event_timestamp_col = ( + offline_utils.infer_event_timestamp_from_entity_df(entity_schema) + ) + + entity_df_event_timestamp_range = _get_entity_df_event_timestamp_range( + entity_df, entity_df_event_timestamp_col, config + ) + + @contextlib.contextmanager + def query_generator() -> Iterator[str]: + source = cast(CouchbaseColumnarSource, feature_views[0].batch_source) + database = source.database + scope = source.scope + + table_name = ( + f"{database}.{scope}.{offline_utils.get_temp_entity_table_name()}" + ) + + _upload_entity_df(config, entity_df, table_name) + + expected_join_keys = offline_utils.get_expected_join_keys( + project, feature_views, registry + ) + + offline_utils.assert_expected_columns_in_entity_df( + entity_schema, expected_join_keys, entity_df_event_timestamp_col + ) + + query_context = offline_utils.get_feature_view_query_context( + feature_refs, + feature_views, + registry, + project, + entity_df_event_timestamp_range, + ) + + query_context_dict = [asdict(context) for context in query_context] + + try: + query = build_point_in_time_query( + query_context_dict, + left_table_query_string=table_name, + entity_df_event_timestamp_col=entity_df_event_timestamp_col, + entity_df_columns=entity_schema.keys(), + query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN, + full_feature_names=full_feature_names, + ) + yield query + finally: + if table_name: + _execute_query( + config.offline_store, + f"DROP COLLECTION {table_name} IF EXISTS", + ) + + return CouchbaseColumnarRetrievalJob( + query=query_generator, + config=config, + full_feature_names=full_feature_names, + on_demand_feature_views=OnDemandFeatureView.get_requested_odfvs( + feature_refs, project, registry + ), + metadata=RetrievalMetadata( + features=feature_refs, + keys=list(entity_schema.keys() - {entity_df_event_timestamp_col}), + min_event_timestamp=entity_df_event_timestamp_range[0], + max_event_timestamp=entity_df_event_timestamp_range[1], + ), + timestamp_field=entity_df_event_timestamp_col, + ) + + @staticmethod + def pull_all_from_table_or_query( + config: RepoConfig, + data_source: DataSource, + join_key_columns: List[str], + feature_name_columns: List[str], + timestamp_field: str, + start_date: datetime, + end_date: datetime, + ) -> RetrievalJob: + """ + Fetch all rows from the specified table or query within the time range. + """ + warnings.warn( + "This offline store is an experimental feature in alpha development. " + "Some functionality may still be unstable so functionality can change in the future.", + RuntimeWarning, + ) + assert isinstance(config.offline_store, CouchbaseColumnarOfflineStoreConfig) + assert isinstance(data_source, CouchbaseColumnarSource) + from_expression = data_source.get_table_query_string() + + field_string = ", ".join( + join_key_columns + feature_name_columns + [timestamp_field] + ) + + start_date_normalized = normalize_timestamp(start_date) + end_date_normalized = normalize_timestamp(end_date) + + query = f""" + SELECT {field_string} + FROM {from_expression} + WHERE `{timestamp_field}` BETWEEN '{start_date_normalized}' AND '{end_date_normalized}' + """ + + return CouchbaseColumnarRetrievalJob( + query=query, + config=config, + full_feature_names=False, + on_demand_feature_views=None, + timestamp_field=timestamp_field, + ) + + +class CouchbaseColumnarRetrievalJob(RetrievalJob): + def __init__( + self, + query: Union[str, Callable[[], ContextManager[str]]], + config: RepoConfig, + full_feature_names: bool, + timestamp_field: str, + on_demand_feature_views: Optional[List[OnDemandFeatureView]] = None, + metadata: Optional[RetrievalMetadata] = None, + ): + if not isinstance(query, str): + self._query_generator = query + else: + + @contextlib.contextmanager + def query_generator() -> Iterator[str]: + assert isinstance(query, str) + yield query + + self._query_generator = query_generator + self._config = config + self._full_feature_names = full_feature_names + self._on_demand_feature_views = on_demand_feature_views or [] + self._metadata = metadata + self._timestamp_field = timestamp_field + + @property + def full_feature_names(self) -> bool: + return self._full_feature_names + + @property + def on_demand_feature_views(self) -> List[OnDemandFeatureView]: + return self._on_demand_feature_views + + def _to_df_internal(self, timeout: Optional[int] = None) -> pd.DataFrame: + # Use PyArrow to convert the result to a pandas DataFrame + return self._to_arrow_internal(timeout).to_pandas() + + def to_sql(self) -> str: + with self._query_generator() as query: + return query + + def _to_arrow_internal(self, timeout: Optional[int] = None) -> pa.Table: + with self._query_generator() as query: + res = _execute_query(self._config.offline_store, query) + rows = res.get_all_rows() + + processed_rows = [] + for row in rows: + processed_row = {} + for key, value in row.items(): + if key == self._timestamp_field and value is not None: + # Parse and ensure timezone-aware datetime + processed_row[key] = pd.to_datetime(value, utc=True) + else: + processed_row[key] = np.nan if value is None else value + processed_rows.append(processed_row) + + # Convert to PyArrow table + table = pa.Table.from_pylist(processed_rows) + return table + + @property + def metadata(self) -> Optional[RetrievalMetadata]: + return self._metadata + + def persist( + self, + storage: SavedDatasetStorage, + allow_overwrite: Optional[bool] = False, + timeout: Optional[int] = None, + ): + assert isinstance(storage, SavedDatasetCouchbaseColumnarStorage) + table_name = f"{storage.couchbase_options._database}.{storage.couchbase_options._scope}.{offline_utils.get_temp_entity_table_name()}" + df_to_columnar(self.to_df(), table_name, self._config.offline_store) + + +def _get_columnar_cluster(config: CouchbaseColumnarOfflineStoreConfig) -> Cluster: + assert config.connection_string is not None + assert config.user is not None + assert config.password is not None + + cred = Credential.from_username_and_password(config.user, config.password) + timeout_opts = TimeoutOptions(dispatch_timeout=timedelta(seconds=120)) + return Cluster.create_instance( + config.connection_string, cred, ClusterOptions(timeout_options=timeout_opts) + ) + + +def _execute_query( + config: CouchbaseColumnarOfflineStoreConfig, + query: str, + named_params: Optional[Dict[str, Any]] = None, +) -> BlockingQueryResult: + cluster = _get_columnar_cluster(config) + return cluster.execute_query( + query, + QueryOptions( + named_parameters=named_params, timeout=timedelta(seconds=config.timeout) + ), + ) + + +def df_to_columnar( + df: pd.DataFrame, + table_name: str, + offline_store: CouchbaseColumnarOfflineStoreConfig, +): + df_copy = df.copy() + insert_values = df_copy.apply( + lambda row: { + col: ( + normalize_timestamp(row[col], "%Y-%m-%dT%H:%M:%S.%f+00:00") + if isinstance(row[col], pd.Timestamp) + else row[col] + ) + for col in df_copy.columns + }, + axis=1, + ).tolist() + + create_collection_query = f"CREATE COLLECTION {table_name} IF NOT EXISTS PRIMARY KEY(pk: UUID) AUTOGENERATED;" + insert_query = f"INSERT INTO {table_name} ({insert_values});" + + _execute_query(offline_store, create_collection_query) + _execute_query(offline_store, insert_query) + + +def _upload_entity_df( + config: RepoConfig, entity_df: Union[pd.DataFrame, str], table_name: str +): + if isinstance(entity_df, pd.DataFrame): + df_to_columnar(entity_df, table_name, config.offline_store) + elif isinstance(entity_df, str): + # If the entity_df is a string (SQL query), create a Columnar collection out of it + create_collection_query = f""" + CREATE COLLECTION {table_name} IF NOT EXISTS + PRIMARY KEY(pk: UUID) AUTOGENERATED + AS {entity_df} + """ + _execute_query(config.offline_store, create_collection_query) + else: + raise InvalidEntityType(type(entity_df)) + + +def _get_entity_df_event_timestamp_range( + entity_df: Union[pd.DataFrame, str], + entity_df_event_timestamp_col: str, + config: RepoConfig, +) -> Tuple[datetime, datetime]: + if isinstance(entity_df, pd.DataFrame): + entity_df_event_timestamp = entity_df.loc[ + :, entity_df_event_timestamp_col + ].infer_objects() + if pd.api.types.is_string_dtype(entity_df_event_timestamp): + entity_df_event_timestamp = pd.to_datetime( + entity_df_event_timestamp, utc=True + ) + entity_df_event_timestamp_range = ( + entity_df_event_timestamp.min().to_pydatetime(), + entity_df_event_timestamp.max().to_pydatetime(), + ) + + elif isinstance(entity_df, str): + query = f""" + SELECT + MIN({entity_df_event_timestamp_col}) AS min, + MAX({entity_df_event_timestamp_col}) AS max + FROM ({entity_df}) AS tmp_alias + """ + + res = _execute_query(config.offline_store, query) + rows = res.get_all_rows() + + if not rows: + raise ZeroRowsQueryResult(query) + + # Convert the string timestamps to datetime objects + min_ts = pd.to_datetime(rows[0]["min"], utc=True).to_pydatetime() + max_ts = pd.to_datetime(rows[0]["max"], utc=True).to_pydatetime() + entity_df_event_timestamp_range = (min_ts, max_ts) + else: + raise InvalidEntityType(type(entity_df)) + return entity_df_event_timestamp_range + + +def _escape_column(column: str) -> str: + """Wrap column names in backticks to handle reserved words.""" + return f"`{column}`" + + +def _append_alias(field_names: List[str], alias: str) -> List[str]: + """Append alias to escaped column names.""" + return [f"{alias}.{_escape_column(field_name)}" for field_name in field_names] + + +def build_point_in_time_query( + feature_view_query_contexts: List[dict], + left_table_query_string: str, + entity_df_event_timestamp_col: str, + entity_df_columns: KeysView[str], + query_template: str, + full_feature_names: bool = False, +) -> str: + """Build point-in-time query between each feature view table and the entity dataframe for Couchbase Columnar""" + template = Environment(loader=BaseLoader()).from_string(source=query_template) + final_output_feature_names = list(entity_df_columns) + final_output_feature_names.extend( + [ + ( + f"{fv['name']}__{fv['field_mapping'].get(feature, feature)}" + if full_feature_names + else fv["field_mapping"].get(feature, feature) + ) + for fv in feature_view_query_contexts + for feature in fv["features"] + ] + ) + + # Add additional fields to dict + template_context = { + "left_table_query_string": left_table_query_string, + "entity_df_event_timestamp_col": entity_df_event_timestamp_col, + "unique_entity_keys": set( + [entity for fv in feature_view_query_contexts for entity in fv["entities"]] + ), + "featureviews": feature_view_query_contexts, + "full_feature_names": full_feature_names, + "final_output_feature_names": final_output_feature_names, + } + + query = template.render(template_context) + return query + + +def get_couchbase_query_schema(config, entity_df: str) -> Dict[str, np.dtype]: + df_query = f"({entity_df}) AS sub" + res = _execute_query(config.offline_store, f"SELECT sub.* FROM {df_query} LIMIT 1") + rows = res.get_all_rows() + + if rows and len(rows) > 0: + # Get the first row + first_row = rows[0] + # Create dictionary mapping each column to dtype('O') + return {key: np.dtype("O") for key in first_row.keys()} + + return {} + + +def _get_entity_schema( + entity_df: Union[pd.DataFrame, str], + config: RepoConfig, +) -> Dict[str, np.dtype]: + if isinstance(entity_df, pd.DataFrame): + return dict(zip(entity_df.columns, entity_df.dtypes)) + + elif isinstance(entity_df, str): + return get_couchbase_query_schema(config, entity_df) + else: + raise InvalidEntityType(type(entity_df)) + + +MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """ +WITH entity_dataframe AS ( + SELECT e.*, + e.`{{entity_df_event_timestamp_col}}` AS entity_timestamp + {% for featureview in featureviews -%} + {% if featureview.entities -%} + ,CONCAT( + {% for entity in featureview.entities -%} + TOSTRING(e.`{{entity}}`), + {% endfor -%} + TOSTRING(e.`{{entity_df_event_timestamp_col}}`) + ) AS `{{featureview.name}}__entity_row_unique_id` + {% else -%} + ,TOSTRING(e.`{{entity_df_event_timestamp_col}}`) AS `{{featureview.name}}__entity_row_unique_id` + {% endif -%} + {% endfor %} + FROM {{ left_table_query_string }} e +), + +{% for featureview in featureviews %} + +`{{ featureview.name }}__entity_dataframe` AS ( + SELECT + {% if featureview.entities %}`{{ featureview.entities | join('`, `') }}`,{% endif %} + entity_timestamp, + `{{featureview.name}}__entity_row_unique_id` + FROM entity_dataframe + GROUP BY + {% if featureview.entities %}`{{ featureview.entities | join('`, `')}}`,{% endif %} + entity_timestamp, + `{{featureview.name}}__entity_row_unique_id` +), + +/* + This query template performs the point-in-time correctness join for a single feature set table + to the provided entity table. + + 1. We first join the current feature_view to the entity dataframe that has been passed. + This JOIN has the following logic: + - For each row of the entity dataframe, only keep the rows where the `timestamp_field` + is less than the one provided in the entity dataframe + - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field` + is higher the the one provided minus the TTL + - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been + computed previously + + The output of this CTE will contain all the necessary information and already filtered out most + of the data that is not relevant. +*/ +`{{ featureview.name }}__subquery` AS ( + LET max_ts = (SELECT RAW MAX(entity_timestamp) FROM entity_dataframe)[0] + SELECT s.* FROM ( + LET min_ts = (SELECT RAW MIN(entity_timestamp) FROM entity_dataframe)[0] + SELECT + `{{ featureview.timestamp_field }}` as event_timestamp, + {{ '`' ~ featureview.created_timestamp_column ~ '` as created_timestamp,' if featureview.created_timestamp_column else '' }} + {{ featureview.entity_selections | join(', ')}}{% if featureview.entity_selections %},{% else %}{% endif %} + {% for feature in featureview.features -%} + `{{ feature }}` as {% if full_feature_names %}`{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}`{% else %}`{{ featureview.field_mapping.get(feature, feature) }}`{% endif %}{% if not loop.last %}, {% endif %} + {%- endfor %} + FROM {{ featureview.table_subquery }} AS sub + WHERE `{{ featureview.timestamp_field }}` <= max_ts + {% if featureview.ttl == 0 %}{% else %} + AND date_diff_str(min_ts, `{{ featureview.timestamp_field }}`, "second") <= {{ featureview.ttl }} + {% endif %} + ) s +), + +`{{ featureview.name }}__base` AS ( + SELECT + subquery.*, + entity_dataframe.entity_timestamp, + entity_dataframe.`{{featureview.name}}__entity_row_unique_id` + FROM `{{ featureview.name }}__subquery` AS subquery + INNER JOIN `{{ featureview.name }}__entity_dataframe` AS entity_dataframe + ON TRUE + AND subquery.event_timestamp <= entity_dataframe.entity_timestamp + {% if featureview.ttl == 0 %}{% else %} + AND date_diff_str(entity_dataframe.entity_timestamp, subquery.event_timestamp, "second") <= {{ featureview.ttl }} + {% endif %} + {% for entity in featureview.entities %} + AND subquery.`{{ entity }}` = entity_dataframe.`{{ entity }}` + {% endfor %} +), + +/* + 2. If the `created_timestamp_column` has been set, we need to + deduplicate the data first. This is done by calculating the + `MAX(created_at_timestamp)` for each event_timestamp. + We then join the data on the next CTE +*/ +{% if featureview.created_timestamp_column %} +`{{ featureview.name }}__dedup` AS ( + SELECT + `{{featureview.name}}__entity_row_unique_id`, + event_timestamp, + MAX(created_timestamp) AS created_timestamp + FROM `{{ featureview.name }}__base` + GROUP BY `{{featureview.name}}__entity_row_unique_id`, event_timestamp +), +{% endif %} + +/* + 3. The data has been filtered during the first CTE "*__base" + Thus we only need to compute the latest timestamp of each feature. +*/ +`{{ featureview.name }}__latest` AS ( + SELECT + event_timestamp + {% if featureview.created_timestamp_column %},created_timestamp{% endif %}, + `{{featureview.name}}__entity_row_unique_id` + FROM ( + SELECT base.*, + ROW_NUMBER() OVER( + PARTITION BY base.`{{featureview.name}}__entity_row_unique_id` + ORDER BY base.event_timestamp DESC + {% if featureview.created_timestamp_column %}, base.created_timestamp DESC{% endif %} + ) AS row_number + FROM `{{ featureview.name }}__base` base + {% if featureview.created_timestamp_column %} + INNER JOIN `{{ featureview.name }}__dedup` dedup + ON base.`{{featureview.name}}__entity_row_unique_id` = dedup.`{{featureview.name}}__entity_row_unique_id` + AND base.event_timestamp = dedup.event_timestamp + AND base.created_timestamp = dedup.created_timestamp + {% endif %} + ) AS sub + WHERE sub.row_number = 1 +), + +/* + 4. Once we know the latest value of each feature for a given timestamp, + we can join again the data back to the original "base" dataset +*/ +`{{ featureview.name }}__cleaned` AS ( + SELECT base.* + FROM `{{ featureview.name }}__base` AS base + INNER JOIN `{{ featureview.name }}__latest` AS latest + ON base.`{{featureview.name}}__entity_row_unique_id` = latest.`{{featureview.name}}__entity_row_unique_id` + AND base.event_timestamp = latest.event_timestamp + {% if featureview.created_timestamp_column %} + AND base.created_timestamp = latest.created_timestamp + {% endif %} +){% if not loop.last %},{% endif %} + +{% endfor %} + +/* + Joins the outputs of multiple time travel joins to a single table. + The entity_dataframe dataset being our source of truth here. + */ +SELECT DISTINCT + {%- set fields = [] %} + {%- for feature_name in final_output_feature_names %} + {%- if '__' not in feature_name %} + {%- set ns = namespace(found=false) %} + {%- for fv in featureviews %} + {%- for feature in fv.features %} + {%- if feature == feature_name %} + {%- set ns.found = true %} + {%- if full_feature_names %} + {%- set _ = fields.append('IFMISSINGORNULL(`' ~ fv.name ~ '_final`.`' ~ fv.name ~ '__' ~ feature ~ '`, null) AS `' ~ fv.name ~ '__' ~ feature ~ '`') %} + {%- else %} + {%- set _ = fields.append('IFMISSINGORNULL(`' ~ fv.name ~ '_final`.`' ~ feature ~ '`, null) AS `' ~ feature ~ '`') %} + {%- endif %} + {%- endif %} + {%- endfor %} + {%- endfor %} + {%- if not ns.found %} + {%- if feature_name == 'feature_name' %} + {%- set _ = fields.append('IFMISSINGORNULL(`field_mapping_final`.`' ~ feature_name ~ '`, null) AS `' ~ feature_name ~ '`') %} + {%- else %} + {%- set _ = fields.append('main_entity.`' ~ feature_name ~ '`') %} + {%- endif %} + {%- endif %} + {%- else %} + {%- set feature_parts = feature_name.split('__') %} + {%- set fv_name = feature_parts[0] %} + {%- set feature = feature_parts[1] %} + {%- if feature_name == 'field_mapping__feature_name' %} + {%- set _ = fields.append('IFMISSINGORNULL(`field_mapping_final`.`field_mapping__feature_name`, null) AS `field_mapping__feature_name`') %} + {%- else %} + {%- set _ = fields.append('IFMISSINGORNULL(`' ~ fv_name ~ '_final`.`' ~ feature_name ~ '`, null) AS `' ~ feature_name ~ '`') %} + {%- endif %} + {%- endif %} + {%- endfor %} + {{ fields | reject('none') | join(',\n ') }} +FROM entity_dataframe AS main_entity + +{%- for featureview in featureviews %} +LEFT JOIN ( + SELECT + `{{featureview.name}}__entity_row_unique_id`, + {% for feature in featureview.features -%} + IFMISSINGORNULL(`{% if full_feature_names %}{{ featureview.name }}__{{ featureview.field_mapping.get(feature, feature) }}{% else %}{{ featureview.field_mapping.get(feature, feature) }}{% endif %}`, null) AS `{% if full_feature_names %}{{ featureview.name }}__{{ featureview.field_mapping.get(feature, feature) }}{% else %}{{ featureview.field_mapping.get(feature, feature) }}{% endif %}`{% if not loop.last %},{% endif %} + {% endfor %} + FROM `{{ featureview.name }}__cleaned` +) AS `{{featureview.name}}_final` +ON main_entity.`{{featureview.name}}__entity_row_unique_id` = `{{featureview.name}}_final`.`{{featureview.name}}__entity_row_unique_id` +{% endfor %} +""" diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase_source.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase_source.py new file mode 100644 index 00000000000..89e4aa2332e --- /dev/null +++ b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/couchbase_source.py @@ -0,0 +1,406 @@ +import json +from datetime import timedelta +from typing import Any, Callable, Dict, Iterable, Optional, Tuple + +from couchbase_columnar.cluster import Cluster +from couchbase_columnar.credential import Credential +from couchbase_columnar.options import ClusterOptions, QueryOptions, TimeoutOptions +from typeguard import typechecked + +from feast.data_source import DataSource +from feast.errors import DataSourceNoNameException, ZeroColumnQueryResult +from feast.feature_logging import LoggingDestination +from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto +from feast.protos.feast.core.FeatureService_pb2 import ( + LoggingConfig as LoggingConfigProto, +) +from feast.protos.feast.core.SavedDataset_pb2 import ( + SavedDatasetStorage as SavedDatasetStorageProto, +) +from feast.repo_config import RepoConfig +from feast.saved_dataset import SavedDatasetStorage +from feast.type_map import ValueType, cb_columnar_type_to_feast_value_type + + +@typechecked +class CouchbaseColumnarSource(DataSource): + """A CouchbaseColumnarSource object defines a data source that a CouchbaseColumnarOfflineStore class can use.""" + + def __init__( + self, + name: Optional[str] = None, + query: Optional[str] = None, + database: Optional[str] = "Default", + scope: Optional[str] = "Default", + collection: Optional[str] = None, + timestamp_field: Optional[str] = "", + created_timestamp_column: Optional[str] = "", + field_mapping: Optional[Dict[str, str]] = None, + description: Optional[str] = "", + tags: Optional[Dict[str, str]] = None, + owner: Optional[str] = "", + ): + """Creates a CouchbaseColumnarSource object. + + Args: + name: Name of CouchbaseColumnarSource, which should be unique within a project. + query: SQL++ query that will be used to fetch the data. + database: Columnar database name. + scope: Columnar scope name. + collection: Columnar collection name. + timestamp_field (optional): Event timestamp field used for point-in-time joins of + feature values. + created_timestamp_column (optional): Timestamp column indicating when the row + was created, used for deduplicating rows. + field_mapping (optional): A dictionary mapping of field names in this data + source to feature names in a feature table or view. Only used for feature + fields, not entity or timestamp fields. + description (optional): A human-readable description. + tags (optional): A dictionary of key-value pairs to store arbitrary metadata. + owner (optional): The owner of the data source, typically the email of the primary + maintainer. + """ + self._couchbase_options = CouchbaseColumnarOptions( + name=name, + query=query, + database=database, + scope=scope, + collection=collection, + ) + + # If no name, use the collection as the default name. + if name is None and collection is None: + raise DataSourceNoNameException() + name = name or collection + assert name + + super().__init__( + name=name, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + field_mapping=field_mapping, + description=description, + tags=tags, + owner=owner, + ) + + def __hash__(self): + return super().__hash__() + + def __eq__(self, other): + if not isinstance(other, CouchbaseColumnarSource): + raise TypeError( + "Comparisons should only involve CouchbaseColumnarSource class objects." + ) + + return ( + super().__eq__(other) + and self._couchbase_options._query == other._couchbase_options._query + and self.timestamp_field == other.timestamp_field + and self.created_timestamp_column == other.created_timestamp_column + and self.field_mapping == other.field_mapping + ) + + @staticmethod + def from_proto(data_source: DataSourceProto): + assert data_source.HasField("custom_options") + + couchbase_options = json.loads(data_source.custom_options.configuration) + + return CouchbaseColumnarSource( + name=couchbase_options["name"], + query=couchbase_options["query"], + database=couchbase_options["database"], + scope=couchbase_options["scope"], + collection=couchbase_options["collection"], + field_mapping=dict(data_source.field_mapping), + timestamp_field=data_source.timestamp_field, + created_timestamp_column=data_source.created_timestamp_column, + description=data_source.description, + tags=dict(data_source.tags), + owner=data_source.owner, + ) + + def to_proto(self) -> DataSourceProto: + data_source_proto = DataSourceProto( + name=self.name, + type=DataSourceProto.CUSTOM_SOURCE, + data_source_class_type="feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source.CouchbaseColumnarSource", + field_mapping=self.field_mapping, + custom_options=self._couchbase_options.to_proto(), + description=self.description, + tags=self.tags, + owner=self.owner, + ) + + data_source_proto.timestamp_field = self.timestamp_field + data_source_proto.created_timestamp_column = self.created_timestamp_column + + return data_source_proto + + def validate(self, config: RepoConfig): + pass + + @staticmethod + def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: + # Define the type conversion for Couchbase fields to Feast ValueType as needed + return cb_columnar_type_to_feast_value_type + + def _infer_composite_type(self, field: Dict[str, Any]) -> str: + """ + Infers type signature for a field, rejecting complex nested structures that + aren't compatible with Feast's type system. + + Args: + field: Dictionary containing field information including type and nested structures + + Returns: + String representation of the type, or raises ValueError for incompatible types + + Raises: + ValueError: If field contains complex nested structures not supported by Feast + """ + base_type = field.get("field-type", "unknown").lower() + + if base_type == "array": + if "list" not in field or not field["list"]: + return "array" + + item_type = field["list"][0] + if item_type.get("field-type") == "object": + raise ValueError( + "Complex object types in arrays are not supported by Feast. " + "Arrays must contain homogeneous primitive values." + ) + + # Only allow arrays of primitive types + inner_type = item_type.get("field-type", "unknown") + if inner_type in ["array", "multiset", "object"]: + raise ValueError( + "Nested collection types are not supported by Feast. " + "Arrays can only be one level deep." + ) + + return f"array<{inner_type}>" + + elif base_type == "object": + raise ValueError( + "Complex object types are not supported by Feast. " + "Only primitive types and homogeneous arrays are allowed." + ) + + elif base_type == "multiset": + raise ValueError( + "Multiset types are not supported by Feast. " + "Only primitive types and homogeneous arrays are allowed." + ) + + return base_type + + def get_table_column_names_and_types( + self, config: RepoConfig + ) -> Iterable[Tuple[str, str]]: + cred = Credential.from_username_and_password( + config.offline_store.user, config.offline_store.password + ) + timeout_opts = TimeoutOptions(dispatch_timeout=timedelta(seconds=120)) + cluster = Cluster.create_instance( + config.offline_store.connection_string, + cred, + ClusterOptions(timeout_options=timeout_opts), + ) + + query_context = self.get_table_query_string() + query = f""" + SELECT get_object_fields( + CASE WHEN ARRAY_LENGTH(OBJECT_PAIRS(t)) = 1 AND OBJECT_PAIRS(t)[0].`value` IS NOT MISSING + THEN OBJECT_PAIRS(t)[0].`value` + ELSE t + END + ) AS field_types + FROM {query_context} AS t + LIMIT 1; + """ + + result = cluster.execute_query( + query, QueryOptions(timeout=timedelta(seconds=config.offline_store.timeout)) + ) + if not result: + raise ZeroColumnQueryResult(query) + + rows = result.get_all_rows() + field_type_pairs = [] + if rows and rows[0]: + # Accessing the "field_types" array from the first row + field_types_list = rows[0].get("field_types", []) + for field in field_types_list: + field_name = field.get("field-name", "unknown") + field_type = field.get("field-type", "unknown") + # drop uuid fields to ensure schema matches dataframe + if field_type == "uuid": + continue + field_type = self._infer_composite_type(field) + field_type_pairs.append((field_name, field_type)) + return field_type_pairs + + def get_table_query_string(self) -> str: + if ( + self._couchbase_options._database + and self._couchbase_options._scope + and self._couchbase_options._collection + ): + return f"`{self._couchbase_options._database}`.`{self._couchbase_options._scope}`.`{self._couchbase_options._collection}`" + else: + return f"({self._couchbase_options._query})" + + @property + def database(self) -> str: + """Returns the database name.""" + return self._couchbase_options._database + + @property + def scope(self) -> str: + """Returns the scope name.""" + return self._couchbase_options._scope + + +class CouchbaseColumnarOptions: + def __init__( + self, + name: Optional[str], + query: Optional[str], + database: Optional[str], + scope: Optional[str], + collection: Optional[str], + ): + self._name = name or "" + self._query = query or "" + self._database = database or "" + self._scope = scope or "" + self._collection = collection or "" + + @classmethod + def from_proto(cls, couchbase_options_proto: DataSourceProto.CustomSourceOptions): + config = json.loads(couchbase_options_proto.configuration.decode("utf8")) + couchbase_options = cls( + name=config["name"], + query=config["query"], + database=config["database"], + scope=config["scope"], + collection=config["collection"], + ) + + return couchbase_options + + def to_proto(self) -> DataSourceProto.CustomSourceOptions: + couchbase_options_proto = DataSourceProto.CustomSourceOptions( + configuration=json.dumps( + { + "name": self._name, + "query": self._query, + "database": self._database, + "scope": self._scope, + "collection": self._collection, + } + ).encode() + ) + return couchbase_options_proto + + +class SavedDatasetCouchbaseColumnarStorage(SavedDatasetStorage): + _proto_attr_name = "custom_storage" + + couchbase_options: CouchbaseColumnarOptions + + def __init__(self, database_ref: str, scope_ref: str, collection_ref: str): + self.couchbase_options = CouchbaseColumnarOptions( + database=database_ref, + scope=scope_ref, + collection=collection_ref, + name=None, + query=None, + ) + + @staticmethod + def from_proto(storage_proto: SavedDatasetStorageProto) -> SavedDatasetStorage: + return SavedDatasetCouchbaseColumnarStorage( + database_ref=CouchbaseColumnarOptions.from_proto( + storage_proto.custom_storage + )._database, + scope_ref=CouchbaseColumnarOptions.from_proto( + storage_proto.custom_storage + )._scope, + collection_ref=CouchbaseColumnarOptions.from_proto( + storage_proto.custom_storage + )._collection, + ) + + def to_proto(self) -> SavedDatasetStorageProto: + return SavedDatasetStorageProto( + custom_storage=self.couchbase_options.to_proto() + ) + + def to_data_source(self) -> DataSource: + return CouchbaseColumnarSource( + database=self.couchbase_options._database, + scope=self.couchbase_options._scope, + collection=self.couchbase_options._collection, + ) + + +class CouchbaseColumnarLoggingDestination(LoggingDestination): + """ + Couchbase Columnar implementation of a logging destination. + """ + + database: str + scope: str + table_name: str + + _proto_kind = "couchbase_columnar_destination" + + def __init__(self, *, database: str, scope: str, table_name: str): + """ + Args: + database: The Couchbase database name + scope: The Couchbase scope name + table_name: The Couchbase collection name to log features into + """ + self.database = database + self.scope = scope + self.table_name = table_name + + def to_data_source(self) -> DataSource: + """ + Returns a data source object representing the logging destination. + """ + return CouchbaseColumnarSource( + database=self.database, + scope=self.scope, + collection=self.table_name, + ) + + def to_proto(self) -> LoggingConfigProto: + """ + Converts the logging destination to its protobuf representation. + """ + return LoggingConfigProto( + couchbase_columnar_destination=LoggingConfigProto.CouchbaseColumnarDestination( + database=self.database, + scope=self.scope, + collection=self.table_name, + ) + ) + + @classmethod + def from_proto( + cls, config_proto: LoggingConfigProto + ) -> "CouchbaseColumnarLoggingDestination": + """ + Creates a CouchbaseColumnarLoggingDestination from its protobuf representation. + """ + return CouchbaseColumnarLoggingDestination( + database=config_proto.CouchbaseColumnarDestination.database, + scope=config_proto.CouchbaseColumnarDestination.scope, + table_name=config_proto.CouchbaseColumnarDestination.collection, + ) diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/tests/__init__.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/tests/data_source.py b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/tests/data_source.py new file mode 100644 index 00000000000..c23a8301a76 --- /dev/null +++ b/sdk/python/feast/infra/offline_stores/contrib/couchbase_offline_store/tests/data_source.py @@ -0,0 +1,213 @@ +import atexit +import json +import os +import signal +import threading +import uuid +from datetime import timedelta +from typing import Dict, List, Optional + +import pandas as pd +from couchbase_columnar.cluster import Cluster +from couchbase_columnar.credential import Credential +from couchbase_columnar.options import ClusterOptions, QueryOptions, TimeoutOptions + +from feast.data_source import DataSource +from feast.feature_logging import LoggingDestination +from feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase import ( + CouchbaseColumnarOfflineStoreConfig, +) +from feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source import ( + CouchbaseColumnarLoggingDestination, + CouchbaseColumnarSource, +) +from feast.infra.utils.couchbase.couchbase_utils import normalize_timestamp +from feast.repo_config import FeastConfigBaseModel +from tests.integration.feature_repos.universal.data_source_creator import ( + DataSourceCreator, +) + +COUCHBASE_COLUMNAR_DATABASE = "Default" +COUCHBASE_COLUMNAR_SCOPE = "Default" + + +class CouchbaseColumnarDataSourceCreator(DataSourceCreator): + _shutting_down = False + _cluster = None + _cluster_lock = threading.Lock() + + @classmethod + def get_cluster(cls): + with cls._cluster_lock: + if cls._cluster is None: + cred = Credential.from_username_and_password( + os.environ["COUCHBASE_COLUMNAR_USER"], + os.environ["COUCHBASE_COLUMNAR_PASSWORD"], + ) + timeout_opts = TimeoutOptions(dispatch_timeout=timedelta(seconds=120)) + cls._cluster = Cluster.create_instance( + os.environ["COUCHBASE_COLUMNAR_CONNECTION_STRING"], + cred, + ClusterOptions(timeout_options=timeout_opts), + ) + return cls._cluster + + def __init__(self, project_name: str, **kwargs): + super().__init__(project_name) + self.project_name = project_name + self.collections: List[str] = [] + + self.offline_store_config = CouchbaseColumnarOfflineStoreConfig( + type="couchbase.offline", + connection_string=os.environ["COUCHBASE_COLUMNAR_CONNECTION_STRING"], + user=os.environ["COUCHBASE_COLUMNAR_USER"], + password=os.environ["COUCHBASE_COLUMNAR_PASSWORD"], + timeout=120, + ) + + def create_data_source( + self, + df: pd.DataFrame, + destination_name: str, + created_timestamp_column="created_ts", + field_mapping: Optional[Dict[str, str]] = None, + timestamp_field: Optional[str] = "ts", + ) -> DataSource: + def format_row(row): + """Convert row to dictionary, handling NaN and timestamps""" + return { + col: ( + normalize_timestamp(row[col]) + if isinstance(row[col], pd.Timestamp) + else None + if pd.isna(row[col]) + else row[col] + ) + for col in row.index + } + + collection_name = self.get_prefixed_collection_name(destination_name) + + create_cluster_query = f"CREATE ANALYTICS COLLECTION {COUCHBASE_COLUMNAR_DATABASE}.{COUCHBASE_COLUMNAR_SCOPE}.{collection_name} IF NOT EXISTS PRIMARY KEY(pk: UUID) AUTOGENERATED;" + self.get_cluster().execute_query( + create_cluster_query, + QueryOptions(timeout=timedelta(seconds=self.offline_store_config.timeout)), + ) + + values_list = df.apply(format_row, axis=1).apply(json.dumps).tolist() + values_clause = ",\n ".join(values_list) + + insert_query = f""" + INSERT INTO `{COUCHBASE_COLUMNAR_DATABASE}`.`{COUCHBASE_COLUMNAR_SCOPE}`.`{collection_name}` ([ + {values_clause} + ]) + """ + self.get_cluster().execute_query( + insert_query, + QueryOptions(timeout=timedelta(seconds=self.offline_store_config.timeout)), + ) + + self.collections.append(collection_name) + + return CouchbaseColumnarSource( + name=collection_name, + query=f"SELECT VALUE v FROM {COUCHBASE_COLUMNAR_DATABASE}.{COUCHBASE_COLUMNAR_SCOPE}.`{collection_name}` v", + database=COUCHBASE_COLUMNAR_DATABASE, + scope=COUCHBASE_COLUMNAR_SCOPE, + collection=collection_name, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + field_mapping=field_mapping or {"ts_1": "ts"}, + ) + + def create_saved_dataset_destination(self): + raise NotImplementedError + + def create_logged_features_destination(self) -> LoggingDestination: + collection = self.get_prefixed_collection_name( + f"logged_features_{str(uuid.uuid4()).replace('-', '_')}" + ) + self.collections.append(collection) + return CouchbaseColumnarLoggingDestination( + table_name=collection, + database=COUCHBASE_COLUMNAR_DATABASE, + scope=COUCHBASE_COLUMNAR_SCOPE, + ) + + def create_offline_store_config(self) -> FeastConfigBaseModel: + return self.offline_store_config + + def get_prefixed_collection_name(self, suffix: str) -> str: + return f"{self.project_name}_{suffix}" + + @classmethod + def get_dangling_collections(cls) -> List[str]: + query = """ + SELECT VALUE d.DatabaseName || '.' || d.DataverseName || '.' || d.DatasetName + FROM System.Metadata.`Dataset` d + WHERE d.DataverseName <> "Metadata" + AND (REGEXP_CONTAINS(d.DatasetName, "integration_test_.*") + OR REGEXP_CONTAINS(d.DatasetName, "feast_entity_df_.*")); + """ + try: + res = cls.get_cluster().execute_query(query) + return res.get_all_rows() + except Exception as e: + print(f"Error fetching collections: {e}") + return [] + + @classmethod + def cleanup_all(cls): + if cls._shutting_down: + return + cls._shutting_down = True + try: + collections = cls.get_dangling_collections() + if len(collections) == 0: + print("No collections to clean up.") + return + + print(f"Found {len(collections)} collections to clean up.") + if len(collections) > 5: + print("This may take a few minutes...") + for collection in collections: + try: + query = f"DROP COLLECTION {collection} IF EXISTS;" + cls.get_cluster().execute_query(query) + print(f"Dropped collection: {collection}") + except Exception as e: + print(f"Error dropping collection {collection}: {e}") + finally: + print("Cleanup complete.") + cls._shutting_down = False + + def teardown(self): + for collection in self.collections: + query = f"DROP COLLECTION {COUCHBASE_COLUMNAR_DATABASE}.{COUCHBASE_COLUMNAR_SCOPE}.`{collection}` IF EXISTS;" + try: + self.get_cluster().execute_query( + query, + QueryOptions( + timeout=timedelta(seconds=self.offline_store_config.timeout) + ), + ) + print(f"Successfully dropped collection: {collection}") + except Exception as e: + print(f"Error dropping collection {collection}: {e}") + + +def cleanup_handler(signum, frame): + print("\nCleaning up dangling resources...") + try: + CouchbaseColumnarDataSourceCreator.cleanup_all() + except Exception as e: + print(f"Error during cleanup: {e}") + finally: + # Re-raise the signal to properly exit + signal.default_int_handler(signum, frame) + + +# Register both SIGINT and SIGTERM handlers +signal.signal(signal.SIGINT, cleanup_handler) +signal.signal(signal.SIGTERM, cleanup_handler) +atexit.register(CouchbaseColumnarDataSourceCreator.cleanup_all) diff --git a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres.py b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres.py index 5239cfb474d..ec6b713941c 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres.py +++ b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres.py @@ -156,7 +156,7 @@ def query_generator() -> Iterator[str]: # Hack for query_context.entity_selections to support uppercase in columns for context in query_context_dict: context["entity_selections"] = [ - f""""{entity_selection.replace(' AS ', '" AS "')}\"""" + f""""{entity_selection.replace(" AS ", '" AS "')}\"""" for entity_selection in context["entity_selections"] ] @@ -370,7 +370,7 @@ def build_point_in_time_query( final_output_feature_names.extend( [ ( - f'{fv["name"]}__{fv["field_mapping"].get(feature, feature)}' + f"{fv['name']}__{fv['field_mapping'].get(feature, feature)}" if full_feature_names else fv["field_mapping"].get(feature, feature) ) diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py index aeb9e3cd68b..41c180f5c3c 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py +++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py @@ -99,6 +99,8 @@ def pull_latest_from_table_or_query( fields_as_string = ", ".join(fields_with_aliases) aliases_as_string = ", ".join(aliases) + date_partition_column = data_source.date_partition_column + start_date_str = _format_datetime(start_date) end_date_str = _format_datetime(end_date) query = f""" @@ -109,7 +111,7 @@ def pull_latest_from_table_or_query( SELECT {fields_as_string}, ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS feast_row_ FROM {from_expression} t1 - WHERE {timestamp_field} BETWEEN TIMESTAMP('{start_date_str}') AND TIMESTAMP('{end_date_str}') + WHERE {timestamp_field} BETWEEN TIMESTAMP('{start_date_str}') AND TIMESTAMP('{end_date_str}'){" AND " + date_partition_column + " >= '" + start_date.strftime("%Y-%m-%d") + "' AND " + date_partition_column + " <= '" + end_date.strftime("%Y-%m-%d") + "' " if date_partition_column != "" and date_partition_column is not None else ""} ) t2 WHERE feast_row_ = 1 """ @@ -641,8 +643,15 @@ def _cast_data_frame( {% endfor %} FROM {{ featureview.table_subquery }} WHERE {{ featureview.timestamp_field }} <= '{{ featureview.max_event_timestamp }}' + {% if featureview.date_partition_column != "" and featureview.date_partition_column is not none %} + AND {{ featureview.date_partition_column }} <= '{{ featureview.max_event_timestamp[:10] }}' + {% endif %} + {% if featureview.ttl == 0 %}{% else %} AND {{ featureview.timestamp_field }} >= '{{ featureview.min_event_timestamp }}' + {% if featureview.date_partition_column != "" and featureview.date_partition_column is not none %} + AND {{ featureview.date_partition_column }} >= '{{ featureview.min_event_timestamp[:10] }}' + {% endif %} {% endif %} ), diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py index 209e3b87e8b..7ad331239ff 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py +++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py @@ -45,6 +45,7 @@ def __init__( tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = None, + date_partition_column: Optional[str] = None, ): """Creates a SparkSource object. @@ -64,6 +65,8 @@ def __init__( maintainer. timestamp_field: Event timestamp field used for point-in-time joins of feature values. + date_partition_column: The column to partition the data on for faster + retrieval. This is useful for large tables and will limit the number ofi """ # If no name, use the table as the default name. if name is None and table is None: @@ -77,6 +80,7 @@ def __init__( created_timestamp_column=created_timestamp_column, field_mapping=field_mapping, description=description, + date_partition_column=date_partition_column, tags=tags, owner=owner, ) @@ -135,6 +139,7 @@ def from_proto(data_source: DataSourceProto) -> Any: query=spark_options.query, path=spark_options.path, file_format=spark_options.file_format, + date_partition_column=data_source.date_partition_column, timestamp_field=data_source.timestamp_field, created_timestamp_column=data_source.created_timestamp_column, description=data_source.description, @@ -148,6 +153,7 @@ def to_proto(self) -> DataSourceProto: type=DataSourceProto.BATCH_SPARK, data_source_class_type="feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource", field_mapping=self.field_mapping, + date_partition_column=self.date_partition_column, spark_options=self.spark_options.to_proto(), description=self.description, tags=self.tags, diff --git a/sdk/python/feast/infra/offline_stores/contrib/trino_offline_store/trino.py b/sdk/python/feast/infra/offline_stores/contrib/trino_offline_store/trino.py index b034d4f9923..9667f4e4720 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/trino_offline_store/trino.py +++ b/sdk/python/feast/infra/offline_stores/contrib/trino_offline_store/trino.py @@ -65,8 +65,8 @@ class JWTAuthModel(FeastConfigBaseModel): class CertificateAuthModel(FeastConfigBaseModel): - cert: FilePath = Field(default=None, alias="cert-file") - key: FilePath = Field(default=None, alias="key-file") + cert: Optional[FilePath] = Field(default=None, alias="cert-file") + key: Optional[FilePath] = Field(default=None, alias="key-file") CLASSES_BY_AUTH_TYPE = { diff --git a/sdk/python/feast/infra/offline_stores/dask.py b/sdk/python/feast/infra/offline_stores/dask.py index d26e8609bae..01efc492f7c 100644 --- a/sdk/python/feast/infra/offline_stores/dask.py +++ b/sdk/python/feast/infra/offline_stores/dask.py @@ -100,11 +100,9 @@ def persist( # Check if the specified location already exists. if not allow_overwrite and os.path.exists(storage.file_options.uri): raise SavedDatasetLocationAlreadyExists(location=storage.file_options.uri) - - if not Path(storage.file_options.uri).is_absolute(): - absolute_path = Path(self.repo_path) / storage.file_options.uri - else: - absolute_path = Path(storage.file_options.uri) + absolute_path = FileSource.get_uri_for_file_path( + repo_path=self.repo_path, uri=storage.file_options.uri + ) filesystem, path = FileSource.create_filesystem_and_path( str(absolute_path), @@ -193,9 +191,7 @@ def evaluate_historical_retrieval(): ): # Make sure all event timestamp fields are tz-aware. We default tz-naive fields to UTC entity_df_with_features[entity_df_event_timestamp_col] = ( - entity_df_with_features[ - entity_df_event_timestamp_col - ].apply( + entity_df_with_features[entity_df_event_timestamp_col].apply( lambda x: x if x.tzinfo is not None else x.replace(tzinfo=timezone.utc) diff --git a/sdk/python/feast/infra/offline_stores/duckdb.py b/sdk/python/feast/infra/offline_stores/duckdb.py index e64da029a6a..b2e3c03cb55 100644 --- a/sdk/python/feast/infra/offline_stores/duckdb.py +++ b/sdk/python/feast/infra/offline_stores/duckdb.py @@ -51,10 +51,9 @@ def _write_data_source( file_options = data_source.file_options - if not Path(file_options.uri).is_absolute(): - absolute_path = Path(repo_path) / file_options.uri - else: - absolute_path = Path(file_options.uri) + absolute_path = FileSource.get_uri_for_file_path( + repo_path=repo_path, uri=file_options.uri + ) if ( mode == "overwrite" diff --git a/sdk/python/feast/infra/offline_stores/file_source.py b/sdk/python/feast/infra/offline_stores/file_source.py index 5912cbdf3fb..af33338265b 100644 --- a/sdk/python/feast/infra/offline_stores/file_source.py +++ b/sdk/python/feast/infra/offline_stores/file_source.py @@ -1,5 +1,6 @@ from pathlib import Path -from typing import Callable, Dict, Iterable, List, Optional, Tuple +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union +from urllib.parse import urlparse import pyarrow from packaging import version @@ -154,17 +155,21 @@ def validate(self, config: RepoConfig): def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]: return type_map.pa_to_feast_value_type + @staticmethod + def get_uri_for_file_path(repo_path: Union[Path, str, None], uri: str) -> str: + parsed_uri = urlparse(uri) + if parsed_uri.scheme and parsed_uri.netloc: + return uri # Keep remote URIs as they are + if repo_path is not None and not Path(uri).is_absolute(): + return str(Path(repo_path) / uri) + return str(Path(uri)) + def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: - if ( - config.repo_path is not None - and not Path(self.file_options.uri).is_absolute() - ): - absolute_path = config.repo_path / self.file_options.uri - else: - absolute_path = Path(self.file_options.uri) - + absolute_path = self.get_uri_for_file_path( + repo_path=config.repo_path, uri=self.file_options.uri + ) filesystem, path = FileSource.create_filesystem_and_path( str(absolute_path), self.file_options.s3_endpoint_override ) diff --git a/sdk/python/feast/infra/offline_stores/offline_utils.py b/sdk/python/feast/infra/offline_stores/offline_utils.py index 2d4fa268e40..5b12636782f 100644 --- a/sdk/python/feast/infra/offline_stores/offline_utils.py +++ b/sdk/python/feast/infra/offline_stores/offline_utils.py @@ -118,6 +118,10 @@ def get_feature_view_query_context( query_context = [] for feature_view, features in feature_views_to_feature_map.items(): + reverse_field_mapping = { + v: k for k, v in feature_view.batch_source.field_mapping.items() + } + join_keys: List[str] = [] entity_selections: List[str] = [] for entity_column in feature_view.entity_columns: @@ -125,16 +129,16 @@ def get_feature_view_query_context( entity_column.name, entity_column.name ) join_keys.append(join_key) - entity_selections.append(f"{entity_column.name} AS {join_key}") + entity_selections.append( + f"{reverse_field_mapping.get(entity_column.name, entity_column.name)} " + f"AS {join_key}" + ) if isinstance(feature_view.ttl, timedelta): ttl_seconds = int(feature_view.ttl.total_seconds()) else: ttl_seconds = 0 - reverse_field_mapping = { - v: k for k, v in feature_view.batch_source.field_mapping.items() - } features = [reverse_field_mapping.get(feature, feature) for feature in features] timestamp_field = reverse_field_mapping.get( feature_view.batch_source.timestamp_field, @@ -186,7 +190,9 @@ def build_point_in_time_query( full_feature_names: bool = False, ) -> str: """Build point-in-time query between each feature view table and the entity dataframe for Bigquery and Redshift""" - template = Environment(loader=BaseLoader()).from_string(source=query_template) + env = Environment(loader=BaseLoader()) + env.filters["backticks"] = enclose_in_backticks + template = env.from_string(source=query_template) final_output_feature_names = list(entity_df_columns) final_output_feature_names.extend( @@ -252,3 +258,11 @@ def get_pyarrow_schema_from_batch_source( column_names.append(column_name) return pa.schema(pa_schema), column_names + + +def enclose_in_backticks(value): + # Check if the input is a list + if isinstance(value, list): + return [f"`{v}`" for v in value] + else: + return f"`{value}`" diff --git a/sdk/python/feast/infra/offline_stores/remote.py b/sdk/python/feast/infra/offline_stores/remote.py index 7ee018ac6d9..d11fb4673db 100644 --- a/sdk/python/feast/infra/offline_stores/remote.py +++ b/sdk/python/feast/infra/offline_stores/remote.py @@ -70,22 +70,45 @@ def list_actions(self, options: FlightCallOptions = None): return super().list_actions(options) -def build_arrow_flight_client(host: str, port, auth_config: AuthConfig): +def build_arrow_flight_client( + scheme: str, host: str, port, auth_config: AuthConfig, cert: str = "" +): + arrow_scheme = "grpc+tcp" + if scheme == "https": + logger.info( + "Scheme is https so going to connect offline server in SSL(TLS) mode." + ) + arrow_scheme = "grpc+tls" + + kwargs = {} + if cert: + with open(cert, "rb") as root_certs: + kwargs["tls_root_certs"] = root_certs.read() + if auth_config.type != AuthType.NONE.value: middlewares = [FlightAuthInterceptorFactory(auth_config)] - return FeastFlightClient(f"grpc://{host}:{port}", middleware=middlewares) + return FeastFlightClient( + f"{arrow_scheme}://{host}:{port}", middleware=middlewares, **kwargs + ) - return FeastFlightClient(f"grpc://{host}:{port}") + return FeastFlightClient(f"{arrow_scheme}://{host}:{port}", **kwargs) class RemoteOfflineStoreConfig(FeastConfigBaseModel): type: Literal["remote"] = "remote" + + scheme: Literal["http", "https"] = "http" + host: StrictStr """ str: remote offline store server port, e.g. the host URL for offline store of arrow flight server. """ port: Optional[StrictInt] = None """ str: remote offline store server port.""" + cert: StrictStr = "" + """ str: Path to the public certificate when the offline server starts in TLS(SSL) mode. This may be needed if the offline server started with a self-signed certificate, typically this file ends with `*.crt`, `*.cer`, or `*.pem`. + If type is 'remote', then this configuration is needed to connect to remote offline server in TLS mode. """ + class RemoteRetrievalJob(RetrievalJob): def __init__( @@ -178,7 +201,11 @@ def get_historical_features( assert isinstance(config.offline_store, RemoteOfflineStoreConfig) client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + scheme=config.offline_store.scheme, + host=config.offline_store.host, + port=config.offline_store.port, + auth_config=config.auth_config, + cert=config.offline_store.cert, ) feature_view_names = [fv.name for fv in feature_views] @@ -214,7 +241,11 @@ def pull_all_from_table_or_query( # Initialize the client connection client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + scheme=config.offline_store.scheme, + host=config.offline_store.host, + port=config.offline_store.port, + auth_config=config.auth_config, + cert=config.offline_store.cert, ) api_parameters = { @@ -247,7 +278,11 @@ def pull_latest_from_table_or_query( # Initialize the client connection client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + config.offline_store.scheme, + config.offline_store.host, + config.offline_store.port, + config.auth_config, + cert=config.offline_store.cert, ) api_parameters = { @@ -282,7 +317,11 @@ def write_logged_features( # Initialize the client connection client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + config.offline_store.scheme, + config.offline_store.host, + config.offline_store.port, + config.auth_config, + config.offline_store.cert, ) api_parameters = { @@ -308,7 +347,11 @@ def offline_write_batch( # Initialize the client connection client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + config.offline_store.scheme, + config.offline_store.host, + config.offline_store.port, + config.auth_config, + config.offline_store.cert, ) feature_view_names = [feature_view.name] @@ -336,7 +379,11 @@ def validate_data_source( assert isinstance(config.offline_store, RemoteOfflineStoreConfig) client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + config.offline_store.scheme, + config.offline_store.host, + config.offline_store.port, + config.auth_config, + config.offline_store.cert, ) api_parameters = { @@ -357,7 +404,11 @@ def get_table_column_names_and_types_from_data_source( assert isinstance(config.offline_store, RemoteOfflineStoreConfig) client = build_arrow_flight_client( - config.offline_store.host, config.offline_store.port, config.auth_config + config.offline_store.scheme, + config.offline_store.host, + config.offline_store.port, + config.auth_config, + config.offline_store.cert, ) api_parameters = { diff --git a/sdk/python/feast/infra/offline_stores/snowflake.py b/sdk/python/feast/infra/offline_stores/snowflake.py index 3d23682769b..101685cec6f 100644 --- a/sdk/python/feast/infra/offline_stores/snowflake.py +++ b/sdk/python/feast/infra/offline_stores/snowflake.py @@ -716,8 +716,8 @@ def _get_entity_df_event_timestamp_range( MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """ /* - Compute a deterministic hash for the `left_table_query_string` that will be used throughout - all the logic as the field to GROUP BY the data + 0. Compute a deterministic hash for the `left_table_query_string` that will be used throughout + all the logic as the field to GROUP BY the data. */ WITH "entity_dataframe" AS ( SELECT *, @@ -739,6 +739,10 @@ def _get_entity_df_event_timestamp_range( {% for featureview in featureviews %} +/* + 1. Only select the required columns with entities of the featureview. +*/ + "{{ featureview.name }}__entity_dataframe" AS ( SELECT {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %} @@ -752,20 +756,7 @@ def _get_entity_df_event_timestamp_range( ), /* - This query template performs the point-in-time correctness join for a single feature set table - to the provided entity table. - - 1. We first join the current feature_view to the entity dataframe that has been passed. - This JOIN has the following logic: - - For each row of the entity dataframe, only keep the rows where the `timestamp_field` - is less than the one provided in the entity dataframe - - If there a TTL for the current feature_view, also keep the rows where the `timestamp_field` - is higher the the one provided minus the TTL - - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been - computed previously - - The output of this CTE will contain all the necessary information and already filtered out most - of the data that is not relevant. +2. Use subquery to prepare event_timestamp, created_timestamp, entity columns and feature columns. */ "{{ featureview.name }}__subquery" AS ( @@ -777,94 +768,61 @@ def _get_entity_df_event_timestamp_range( "{{ feature }}" as {% if full_feature_names %}"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}"{% else %}"{{ featureview.field_mapping.get(feature, feature) }}"{% endif %}{% if loop.last %}{% else %}, {% endif %} {% endfor %} FROM {{ featureview.table_subquery }} - WHERE "{{ featureview.timestamp_field }}" <= '{{ featureview.max_event_timestamp }}' - {% if featureview.ttl == 0 %}{% else %} - AND "{{ featureview.timestamp_field }}" >= '{{ featureview.min_event_timestamp }}' - {% endif %} -), - -"{{ featureview.name }}__base" AS ( - SELECT - "subquery".*, - "entity_dataframe"."entity_timestamp", - "entity_dataframe"."{{featureview.name}}__entity_row_unique_id" - FROM "{{ featureview.name }}__subquery" AS "subquery" - INNER JOIN "{{ featureview.name }}__entity_dataframe" AS "entity_dataframe" - ON TRUE - AND "subquery"."event_timestamp" <= "entity_dataframe"."entity_timestamp" - - {% if featureview.ttl == 0 %}{% else %} - AND "subquery"."event_timestamp" >= TIMESTAMPADD(second,-{{ featureview.ttl }},"entity_dataframe"."entity_timestamp") - {% endif %} - - {% for entity in featureview.entities %} - AND "subquery"."{{ entity }}" = "entity_dataframe"."{{ entity }}" - {% endfor %} ), /* - 2. If the `created_timestamp_column` has been set, we need to - deduplicate the data first. This is done by calculating the - `MAX(created_at_timestamp)` for each event_timestamp. - We then join the data on the next CTE +3. If the `created_timestamp_column` has been set, we need to +deduplicate the data first. This is done by calculating the +`MAX(created_at_timestamp)` for each event_timestamp and joining back on the subquery. +Otherwise, the ASOF JOIN can have unstable side effects +https://docs.snowflake.com/en/sql-reference/constructs/asof-join#expected-behavior-when-ties-exist-in-the-right-table */ + {% if featureview.created_timestamp_column %} "{{ featureview.name }}__dedup" AS ( - SELECT - "{{featureview.name}}__entity_row_unique_id", - "event_timestamp", - MAX("created_timestamp") AS "created_timestamp" - FROM "{{ featureview.name }}__base" - GROUP BY "{{featureview.name}}__entity_row_unique_id", "event_timestamp" + SELECT * + FROM "{{ featureview.name }}__subquery" + INNER JOIN ( + SELECT + {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %} + "event_timestamp", + MAX("created_timestamp") AS "created_timestamp" + FROM "{{ featureview.name }}__subquery" + GROUP BY {{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %} "event_timestamp" + ) + USING({{ featureview.entities | map('tojson') | join(', ')}}{% if featureview.entities %},{% else %}{% endif %} "event_timestamp", "created_timestamp") ), {% endif %} /* - 3. The data has been filtered during the first CTE "*__base" - Thus we only need to compute the latest timestamp of each feature. +4. Make ASOF JOIN of deduplicated feature CTE on reduced entity dataframe. */ -"{{ featureview.name }}__latest" AS ( + +"{{ featureview.name }}__asof_join" AS ( SELECT - "event_timestamp", - {% if featureview.created_timestamp_column %}"created_timestamp",{% endif %} - "{{featureview.name}}__entity_row_unique_id" - FROM - ( - SELECT *, - ROW_NUMBER() OVER( - PARTITION BY "{{featureview.name}}__entity_row_unique_id" - ORDER BY "event_timestamp" DESC{% if featureview.created_timestamp_column %},"created_timestamp" DESC{% endif %} - ) AS "row_number" - FROM "{{ featureview.name }}__base" - {% if featureview.created_timestamp_column %} - INNER JOIN "{{ featureview.name }}__dedup" - USING ("{{featureview.name}}__entity_row_unique_id", "event_timestamp", "created_timestamp") - {% endif %} - ) - WHERE "row_number" = 1 + e.*, + v.* + FROM "{{ featureview.name }}__entity_dataframe" e + ASOF JOIN {% if featureview.created_timestamp_column %}"{{ featureview.name }}__dedup"{% else %}"{{ featureview.name }}__subquery"{% endif %} v + MATCH_CONDITION (e."entity_timestamp" >= v."event_timestamp") + {% if featureview.entities %} USING({{ featureview.entities | map('tojson') | join(', ')}}) {% endif %} ), /* - 4. Once we know the latest value of each feature for a given timestamp, - we can join again the data back to the original "base" dataset +5. If TTL is configured filter the CTE to remove rows where the feature values are older than the configured ttl. */ -"{{ featureview.name }}__cleaned" AS ( - SELECT "base".* - FROM "{{ featureview.name }}__base" AS "base" - INNER JOIN "{{ featureview.name }}__latest" - USING( - "{{featureview.name}}__entity_row_unique_id", - "event_timestamp" - {% if featureview.created_timestamp_column %} - ,"created_timestamp" - {% endif %} - ) -){% if loop.last %}{% else %}, {% endif %} +"{{ featureview.name }}__ttl" AS ( + SELECT * + FROM "{{ featureview.name }}__asof_join" + {% if featureview.ttl == 0 %}{% else %} + WHERE "event_timestamp" >= TIMESTAMPADD(second,-{{ featureview.ttl }},"entity_timestamp") + {% endif %} +){% if loop.last %}{% else %}, {% endif %} {% endfor %} /* - Joins the outputs of multiple time travel joins to a single table. + Join the outputs of multiple time travel joins to a single table. The entity_dataframe dataset being our source of truth here. */ @@ -877,7 +835,7 @@ def _get_entity_df_event_timestamp_range( {% for feature in featureview.features %} ,{% if full_feature_names %}"{{ featureview.name }}__{{featureview.field_mapping.get(feature, feature)}}"{% else %}"{{ featureview.field_mapping.get(feature, feature) }}"{% endif %} {% endfor %} - FROM "{{ featureview.name }}__cleaned" -) "{{ featureview.name }}__cleaned" USING ("{{featureview.name}}__entity_row_unique_id") + FROM "{{ featureview.name }}__ttl" +) "{{ featureview.name }}__ttl" USING ("{{featureview.name}}__entity_row_unique_id") {% endfor %} """ diff --git a/sdk/python/feast/infra/offline_stores/snowflake_source.py b/sdk/python/feast/infra/offline_stores/snowflake_source.py index 1d43fecc03c..b4fcd89af7e 100644 --- a/sdk/python/feast/infra/offline_stores/snowflake_source.py +++ b/sdk/python/feast/infra/offline_stores/snowflake_source.py @@ -285,15 +285,15 @@ def get_table_column_names_and_types( row["snowflake_type"] = "NUMBERwSCALE" elif row["type_code"] in [5, 9, 12]: - error = snowflake_unsupported_map[row["type_code"]] + datatype = snowflake_unsupported_map[row["type_code"]] raise NotImplementedError( - f"The following Snowflake Data Type is not supported: {error}" + f"The datatype of column {row['column_name']} is of type {datatype} in datasource {query}. This type is not supported. Try converting to VARCHAR." ) elif row["type_code"] in [1, 2, 3, 4, 6, 7, 8, 10, 11, 13]: row["snowflake_type"] = snowflake_type_code_map[row["type_code"]] else: raise NotImplementedError( - f"The following Snowflake Column is not supported: {row['column_name']} (type_code: {row['type_code']})" + f"The datatype of column {row['column_name']} in datasource {query} is not supported." ) return [ @@ -317,9 +317,9 @@ def get_table_column_names_and_types( } snowflake_unsupported_map = { - 5: "VARIANT -- Try converting to VARCHAR", - 9: "OBJECT -- Try converting to VARCHAR", - 12: "TIME -- Try converting to VARCHAR", + 5: "VARIANT", + 9: "OBJECT", + 12: "TIME", } python_int_to_snowflake_type_map = { diff --git a/sdk/python/feast/infra/online_stores/couchbase_online_store/README.md b/sdk/python/feast/infra/online_stores/couchbase_online_store/README.md index df1b7a1382d..8f95884fe03 100644 --- a/sdk/python/feast/infra/online_stores/couchbase_online_store/README.md +++ b/sdk/python/feast/infra/online_stores/couchbase_online_store/README.md @@ -28,14 +28,14 @@ cd feature_repo #### Edit `feature_store.yaml` -Set the `online_store` type to `couchbase`, and fill in the required fields as shown below. +Set the `online_store` type to `couchbase.online`, and fill in the required fields as shown below. ```yaml project: feature_repo registry: data/registry.db provider: local online_store: - type: couchbase + type: couchbase.online connection_string: couchbase://127.0.0.1 # Couchbase connection string, copied from 'Connect' page in Couchbase Capella console user: Administrator # Couchbase username from access credentials password: password # Couchbase password from access credentials diff --git a/sdk/python/feast/infra/online_stores/couchbase_online_store/couchbase.py b/sdk/python/feast/infra/online_stores/couchbase_online_store/couchbase.py index 91ce56a5caf..c80f9e1285c 100644 --- a/sdk/python/feast/infra/online_stores/couchbase_online_store/couchbase.py +++ b/sdk/python/feast/infra/online_stores/couchbase_online_store/couchbase.py @@ -31,7 +31,7 @@ class CouchbaseOnlineStoreConfig(FeastConfigBaseModel): Configuration for the Couchbase online store. """ - type: Literal["couchbase"] = "couchbase" + type: Literal["couchbase.online"] = "couchbase.online" connection_string: Optional[StrictStr] = None user: Optional[StrictStr] = None diff --git a/sdk/python/feast/infra/online_stores/elasticsearch_online_store/elasticsearch.py b/sdk/python/feast/infra/online_stores/elasticsearch_online_store/elasticsearch.py index 0152ca330c9..af328141520 100644 --- a/sdk/python/feast/infra/online_stores/elasticsearch_online_store/elasticsearch.py +++ b/sdk/python/feast/infra/online_stores/elasticsearch_online_store/elasticsearch.py @@ -213,7 +213,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], embedding: List[float], top_k: int, *args, diff --git a/sdk/python/feast/infra/online_stores/faiss_online_store.py b/sdk/python/feast/infra/online_stores/faiss_online_store.py index cc2e75800e6..fd4d6768abd 100644 --- a/sdk/python/feast/infra/online_stores/faiss_online_store.py +++ b/sdk/python/feast/infra/online_stores/faiss_online_store.py @@ -176,7 +176,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_featres: Optional[List[str]], embedding: List[float], top_k: int, distance_metric: Optional[str] = None, diff --git a/sdk/python/feast/infra/online_stores/milvus_online_store/__init__.py b/sdk/python/feast/infra/online_stores/milvus_online_store/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/infra/online_stores/milvus_online_store/milvus.py b/sdk/python/feast/infra/online_stores/milvus_online_store/milvus.py new file mode 100644 index 00000000000..91e432a74fa --- /dev/null +++ b/sdk/python/feast/infra/online_stores/milvus_online_store/milvus.py @@ -0,0 +1,632 @@ +from datetime import datetime +from pathlib import Path +from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union + +from pydantic import StrictStr +from pymilvus import ( + CollectionSchema, + DataType, + FieldSchema, + MilvusClient, +) + +from feast import Entity +from feast.feature_view import FeatureView +from feast.infra.infra_object import InfraObject +from feast.infra.key_encoding_utils import ( + deserialize_entity_key, + serialize_entity_key, +) +from feast.infra.online_stores.online_store import OnlineStore +from feast.infra.online_stores.vector_store import VectorStoreConfig +from feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto +from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto +from feast.protos.feast.types.Value_pb2 import Value as ValueProto +from feast.repo_config import FeastConfigBaseModel, RepoConfig +from feast.type_map import ( + PROTO_VALUE_TO_VALUE_TYPE_MAP, + VALUE_TYPE_TO_PROTO_VALUE_MAP, + feast_value_type_to_python_type, +) +from feast.types import ( + VALUE_TYPES_TO_FEAST_TYPES, + Array, + ComplexFeastType, + PrimitiveFeastType, + ValueType, + from_feast_type, +) +from feast.utils import ( + _serialize_vector_to_float_list, + to_naive_utc, +) + +PROTO_TO_MILVUS_TYPE_MAPPING: Dict[ValueType, DataType] = { + PROTO_VALUE_TO_VALUE_TYPE_MAP["bytes_val"]: DataType.VARCHAR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["bool_val"]: DataType.BOOL, + PROTO_VALUE_TO_VALUE_TYPE_MAP["string_val"]: DataType.VARCHAR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["float_val"]: DataType.FLOAT, + PROTO_VALUE_TO_VALUE_TYPE_MAP["double_val"]: DataType.DOUBLE, + PROTO_VALUE_TO_VALUE_TYPE_MAP["int32_val"]: DataType.INT32, + PROTO_VALUE_TO_VALUE_TYPE_MAP["int64_val"]: DataType.INT64, + PROTO_VALUE_TO_VALUE_TYPE_MAP["float_list_val"]: DataType.FLOAT_VECTOR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["int32_list_val"]: DataType.FLOAT_VECTOR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["int64_list_val"]: DataType.FLOAT_VECTOR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["double_list_val"]: DataType.FLOAT_VECTOR, + PROTO_VALUE_TO_VALUE_TYPE_MAP["bool_list_val"]: DataType.BINARY_VECTOR, +} + +FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING: Dict[ + Union[PrimitiveFeastType, Array, ComplexFeastType], DataType +] = {} + +for value_type, feast_type in VALUE_TYPES_TO_FEAST_TYPES.items(): + if isinstance(feast_type, PrimitiveFeastType): + milvus_type = PROTO_TO_MILVUS_TYPE_MAPPING.get(value_type) + if milvus_type: + FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING[feast_type] = milvus_type + elif isinstance(feast_type, Array): + base_type = feast_type.base_type + base_value_type = base_type.to_value_type() + if base_value_type in [ + ValueType.INT32, + ValueType.INT64, + ValueType.FLOAT, + ValueType.DOUBLE, + ]: + FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING[feast_type] = DataType.FLOAT_VECTOR + elif base_value_type == ValueType.STRING: + FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING[feast_type] = DataType.VARCHAR + elif base_value_type == ValueType.BOOL: + FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING[feast_type] = DataType.BINARY_VECTOR + + +class MilvusOnlineStoreConfig(FeastConfigBaseModel, VectorStoreConfig): + """ + Configuration for the Milvus online store. + NOTE: The class *must* end with the `OnlineStoreConfig` suffix. + """ + + type: Literal["milvus"] = "milvus" + path: Optional[StrictStr] = "data/online_store.db" + host: Optional[StrictStr] = "localhost" + port: Optional[int] = 19530 + index_type: Optional[str] = "FLAT" + metric_type: Optional[str] = "COSINE" + embedding_dim: Optional[int] = 128 + vector_enabled: Optional[bool] = True + nlist: Optional[int] = 128 + username: Optional[StrictStr] = "" + password: Optional[StrictStr] = "" + + +class MilvusOnlineStore(OnlineStore): + """ + Milvus implementation of the online store interface. + + Attributes: + _collections: Dictionary to cache Milvus collections. + """ + + client: Optional[MilvusClient] = None + _collections: Dict[str, Any] = {} + + def _get_db_path(self, config: RepoConfig) -> str: + assert ( + config.online_store.type == "milvus" + or config.online_store.type.endswith("MilvusOnlineStore") + ) + + if config.repo_path and not Path(config.online_store.path).is_absolute(): + db_path = str(config.repo_path / config.online_store.path) + else: + db_path = config.online_store.path + return db_path + + def _connect(self, config: RepoConfig) -> MilvusClient: + if not self.client: + if config.provider == "local": + db_path = self._get_db_path(config) + print(f"Connecting to Milvus in local mode using {db_path}") + self.client = MilvusClient(db_path) + else: + self.client = MilvusClient( + url=f"{config.online_store.host}:{config.online_store.port}", + token=f"{config.online_store.username}:{config.online_store.password}" + if config.online_store.username and config.online_store.password + else "", + ) + return self.client + + def _get_or_create_collection( + self, config: RepoConfig, table: FeatureView + ) -> Dict[str, Any]: + self.client = self._connect(config) + vector_field_dict = {k.name: k for k in table.schema if k.vector_index} + collection_name = _table_id(config.project, table) + if collection_name not in self._collections: + # Create a composite key by combining entity fields + composite_key_name = _get_composite_key_name(table) + + fields = [ + FieldSchema( + name=composite_key_name, + dtype=DataType.VARCHAR, + max_length=512, + is_primary=True, + ), + FieldSchema(name="event_ts", dtype=DataType.INT64), + FieldSchema(name="created_ts", dtype=DataType.INT64), + ] + fields_to_exclude = [ + "event_ts", + "created_ts", + ] + fields_to_add = [f for f in table.schema if f.name not in fields_to_exclude] + for field in fields_to_add: + dtype = FEAST_PRIMITIVE_TO_MILVUS_TYPE_MAPPING.get(field.dtype) + if dtype: + if dtype == DataType.FLOAT_VECTOR: + fields.append( + FieldSchema( + name=field.name, + dtype=dtype, + dim=config.online_store.embedding_dim, + ) + ) + else: + fields.append( + FieldSchema( + name=field.name, + dtype=DataType.VARCHAR, + max_length=512, + ) + ) + + schema = CollectionSchema( + fields=fields, description="Feast feature view data" + ) + collection_exists = self.client.has_collection( + collection_name=collection_name + ) + if not collection_exists: + self.client.create_collection( + collection_name=collection_name, + dimension=config.online_store.embedding_dim, + schema=schema, + ) + index_params = self.client.prepare_index_params() + for vector_field in schema.fields: + if ( + vector_field.dtype + in [ + DataType.FLOAT_VECTOR, + DataType.BINARY_VECTOR, + ] + and vector_field.name in vector_field_dict + ): + metric = vector_field_dict[ + vector_field.name + ].vector_search_metric + index_params.add_index( + collection_name=collection_name, + field_name=vector_field.name, + metric_type=metric or config.online_store.metric_type, + index_type=config.online_store.index_type, + index_name=f"vector_index_{vector_field.name}", + params={"nlist": config.online_store.nlist}, + ) + self.client.create_index( + collection_name=collection_name, + index_params=index_params, + ) + else: + self.client.load_collection(collection_name) + self._collections[collection_name] = self.client.describe_collection( + collection_name + ) + return self._collections[collection_name] + + def online_write_batch( + self, + config: RepoConfig, + table: FeatureView, + data: List[ + Tuple[ + EntityKeyProto, + Dict[str, ValueProto], + datetime, + Optional[datetime], + ] + ], + progress: Optional[Callable[[int], Any]], + ) -> None: + self.client = self._connect(config) + collection = self._get_or_create_collection(config, table) + vector_cols = [f.name for f in table.features if f.vector_index] + entity_batch_to_insert = [] + for entity_key, values_dict, timestamp, created_ts in data: + # need to construct the composite primary key also need to handle the fact that entities are a list + entity_key_str = serialize_entity_key( + entity_key, + entity_key_serialization_version=config.entity_key_serialization_version, + ).hex() + # to recover the entity key just run: + # deserialize_entity_key(bytes.fromhex(entity_key_str), entity_key_serialization_version=3) + composite_key_name = _get_composite_key_name(table) + + timestamp_int = int(to_naive_utc(timestamp).timestamp() * 1e6) + created_ts_int = ( + int(to_naive_utc(created_ts).timestamp() * 1e6) if created_ts else 0 + ) + entity_dict = { + join_key: feast_value_type_to_python_type(value) + for join_key, value in zip( + entity_key.join_keys, entity_key.entity_values + ) + } + values_dict.update(entity_dict) + values_dict = _extract_proto_values_to_dict( + values_dict, + vector_cols=vector_cols, + serialize_to_string=True, + ) + + single_entity_record = { + composite_key_name: entity_key_str, + "event_ts": timestamp_int, + "created_ts": created_ts_int, + } + single_entity_record.update(values_dict) + entity_batch_to_insert.append(single_entity_record) + + if progress: + progress(1) + + self.client.insert( + collection_name=collection["collection_name"], + data=entity_batch_to_insert, + ) + + def online_read( + self, + config: RepoConfig, + table: FeatureView, + entity_keys: List[EntityKeyProto], + requested_features: Optional[List[str]] = None, + full_feature_names: bool = False, + ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]: + self.client = self._connect(config) + collection_name = _table_id(config.project, table) + collection = self._get_or_create_collection(config, table) + + composite_key_name = _get_composite_key_name(table) + + output_fields = ( + [composite_key_name] + + (requested_features if requested_features else []) + + ["created_ts", "event_ts"] + ) + assert all( + field in [f["name"] for f in collection["fields"]] + for field in output_fields + ), ( + f"field(s) [{[field for field in output_fields if field not in [f['name'] for f in collection['fields']]]}] not found in collection schema" + ) + composite_entities = [] + for entity_key in entity_keys: + entity_key_str = serialize_entity_key( + entity_key, + entity_key_serialization_version=config.entity_key_serialization_version, + ).hex() + composite_entities.append(entity_key_str) + + query_filter_for_entities = ( + f"{composite_key_name} in [" + + ", ".join([f"'{e}'" for e in composite_entities]) + + "]" + ) + self.client.load_collection(collection_name) + results = self.client.query( + collection_name=collection_name, + filter=query_filter_for_entities, + output_fields=output_fields, + ) + # Group hits by composite key. + grouped_hits: Dict[str, Any] = {} + for hit in results: + key = hit.get(composite_key_name) + grouped_hits.setdefault(key, []).append(hit) + + # Map the features to their Feast types. + feature_name_feast_primitive_type_map = { + f.name: f.dtype for f in table.features + } + # Build a dictionary mapping composite key -> (res_ts, res) + results_dict: Dict[ + str, Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]] + ] = {} + + # here we need to map the data stored as characters back into the protobuf value + for hit in results: + key = hit.get(composite_key_name) + # Only take one hit per composite key (adjust if you need aggregation) + if key not in results_dict: + res = {} + res_ts = None + for field in output_fields: + val = ValueProto() + field_value = hit.get(field, None) + if field_value is None and ":" in field: + _, field_short = field.split(":", 1) + field_value = hit.get(field_short) + + if field in ["created_ts", "event_ts"]: + res_ts = datetime.fromtimestamp(field_value / 1e6) + elif field == composite_key_name: + # We do not return the composite key value + pass + else: + feature_feast_primitive_type = ( + feature_name_feast_primitive_type_map.get( + field, PrimitiveFeastType.INVALID + ) + ) + feature_fv_dtype = from_feast_type(feature_feast_primitive_type) + proto_attr = VALUE_TYPE_TO_PROTO_VALUE_MAP.get(feature_fv_dtype) + if proto_attr: + if proto_attr == "bytes_val": + setattr(val, proto_attr, field_value.encode()) + elif proto_attr in [ + "int32_val", + "int64_val", + "float_val", + "double_val", + ]: + setattr( + val, + proto_attr, + type(getattr(val, proto_attr))(field_value), + ) + elif proto_attr in [ + "int32_list_val", + "int64_list_val", + "float_list_val", + "double_list_val", + ]: + setattr( + val, + proto_attr, + list( + map( + type(getattr(val, proto_attr)).__args__[0], + field_value, + ) + ), + ) + else: + setattr(val, proto_attr, field_value) + else: + raise ValueError( + f"Unsupported ValueType: {feature_feast_primitive_type} with feature view value {field_value} for feature {field} with value {field_value}" + ) + # res[field] = val + key_to_use = field.split(":", 1)[-1] if ":" in field else field + res[key_to_use] = val + results_dict[key] = (res_ts, res if res else None) + + # Map the results back into a list matching the original order of composite_keys. + result_list = [ + results_dict.get(key, (None, None)) for key in composite_entities + ] + + return result_list + + def update( + self, + config: RepoConfig, + tables_to_delete: Sequence[FeatureView], + tables_to_keep: Sequence[FeatureView], + entities_to_delete: Sequence[Entity], + entities_to_keep: Sequence[Entity], + partial: bool, + ): + self.client = self._connect(config) + for table in tables_to_keep: + self._collections = self._get_or_create_collection(config, table) + + for table in tables_to_delete: + collection_name = _table_id(config.project, table) + if self._collections.get(collection_name, None): + self.client.drop_collection(collection_name) + self._collections.pop(collection_name, None) + + def plan( + self, config: RepoConfig, desired_registry_proto: RegistryProto + ) -> List[InfraObject]: + raise NotImplementedError + + def teardown( + self, + config: RepoConfig, + tables: Sequence[FeatureView], + entities: Sequence[Entity], + ): + self.client = self._connect(config) + for table in tables: + collection_name = _table_id(config.project, table) + if self._collections.get(collection_name, None): + self.client.drop_collection(collection_name) + self._collections.pop(collection_name, None) + + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: List[str], + embedding: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ]: + assert embedding is not None, "Key Word Search not yet implemented for Milvus" + entity_name_feast_primitive_type_map = { + k.name: k.dtype for k in table.entity_columns + } + self.client = self._connect(config) + collection_name = _table_id(config.project, table) + collection = self._get_or_create_collection(config, table) + if not config.online_store.vector_enabled: + raise ValueError("Vector search is not enabled in the online store config") + + search_params = { + "metric_type": distance_metric or config.online_store.metric_type, + "params": {"nprobe": 10}, + } + + composite_key_name = _get_composite_key_name(table) + + output_fields = ( + [composite_key_name] + + (requested_features if requested_features else []) + + ["created_ts", "event_ts"] + ) + assert all( + field in [f["name"] for f in collection["fields"]] + for field in output_fields + ), ( + f"field(s) [{[field for field in output_fields if field not in [f['name'] for f in collection['fields']]]}] not found in collection schema" + ) + # Note we choose the first vector field as the field to search on. Not ideal but it's something. + ann_search_field = None + for field in collection["fields"]: + if ( + field["type"] in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR] + and field["name"] in output_fields + ): + ann_search_field = field["name"] + break + + self.client.load_collection(collection_name) + results = self.client.search( + collection_name=collection_name, + data=[embedding], + anns_field=ann_search_field, + search_params=search_params, + limit=top_k, + output_fields=output_fields, + ) + + result_list = [] + for hits in results: + for hit in hits: + res = {} + res_ts = None + entity_key_bytes = bytes.fromhex( + hit.get("entity", {}).get(composite_key_name, None) + ) + entity_key_proto = ( + deserialize_entity_key(entity_key_bytes) + if entity_key_bytes + else None + ) + for field in output_fields: + val = ValueProto() + field_value = hit.get("entity", {}).get(field, None) + # entity_key_proto = None + if field in ["created_ts", "event_ts"]: + res_ts = datetime.fromtimestamp(field_value / 1e6) + elif field == ann_search_field: + serialized_embedding = _serialize_vector_to_float_list( + embedding + ) + res[ann_search_field] = serialized_embedding + elif entity_name_feast_primitive_type_map.get( + field, PrimitiveFeastType.INVALID + ) in [ + PrimitiveFeastType.STRING, + PrimitiveFeastType.INT64, + PrimitiveFeastType.INT32, + PrimitiveFeastType.BYTES, + ]: + res[field] = ValueProto(string_val=field_value) + elif field == composite_key_name: + pass + elif isinstance(field_value, bytes): + val.ParseFromString(field_value) + res[field] = val + else: + val.string_val = field_value + res[field] = val + distance = hit.get("distance", None) + res["distance"] = ( + ValueProto(float_val=distance) if distance else ValueProto() + ) + result_list.append((res_ts, entity_key_proto, res if res else None)) + return result_list + + +def _table_id(project: str, table: FeatureView) -> str: + return f"{project}_{table.name}" + + +def _get_composite_key_name(table: FeatureView) -> str: + return "_".join([field.name for field in table.entity_columns]) + "_pk" + + +def _extract_proto_values_to_dict( + input_dict: Dict[str, Any], + vector_cols: List[str], + serialize_to_string=False, +) -> Dict[str, Any]: + numeric_vector_list_types = [ + k + for k in PROTO_VALUE_TO_VALUE_TYPE_MAP.keys() + if k is not None and "list" in k and "string" not in k + ] + numeric_types = [ + "double_val", + "float_val", + "int32_val", + "int64_val", + "bool_val", + ] + output_dict = {} + for feature_name, feature_values in input_dict.items(): + for proto_val_type in PROTO_VALUE_TO_VALUE_TYPE_MAP: + if not isinstance(feature_values, (int, float, str)): + if feature_values.HasField(proto_val_type): + if proto_val_type in numeric_vector_list_types: + if serialize_to_string and feature_name not in vector_cols: + vector_values = getattr( + feature_values, proto_val_type + ).SerializeToString() + else: + vector_values = getattr(feature_values, proto_val_type).val + else: + if ( + serialize_to_string + and proto_val_type not in ["string_val"] + numeric_types + ): + vector_values = feature_values.SerializeToString().decode() + else: + if not isinstance(feature_values, str): + vector_values = str( + getattr(feature_values, proto_val_type) + ) + else: + vector_values = getattr(feature_values, proto_val_type) + output_dict[feature_name] = vector_values + else: + if serialize_to_string: + if not isinstance(feature_values, str): + feature_values = str(feature_values) + output_dict[feature_name] = feature_values + + return output_dict diff --git a/sdk/python/feast/infra/online_stores/milvus_online_store/milvus_repo_configuration.py b/sdk/python/feast/infra/online_stores/milvus_online_store/milvus_repo_configuration.py new file mode 100644 index 00000000000..8e8402862cb --- /dev/null +++ b/sdk/python/feast/infra/online_stores/milvus_online_store/milvus_repo_configuration.py @@ -0,0 +1,12 @@ +from tests.integration.feature_repos.integration_test_repo_config import ( + IntegrationTestRepoConfig, +) +from tests.integration.feature_repos.universal.online_store.milvus import ( + MilvusOnlineStoreCreator, +) + +FULL_REPO_CONFIGS = [ + IntegrationTestRepoConfig( + online_store="milvus", online_store_creator=MilvusOnlineStoreCreator + ), +] diff --git a/sdk/python/feast/infra/online_stores/online_store.py b/sdk/python/feast/infra/online_stores/online_store.py index 789885f82bc..5111bcd47bd 100644 --- a/sdk/python/feast/infra/online_stores/online_store.py +++ b/sdk/python/feast/infra/online_stores/online_store.py @@ -187,7 +187,7 @@ def get_online_features( for table, requested_features in grouped_refs: # Get the correct set of entity values with the correct join keys. - table_entity_values, idxs = utils._get_unique_entities( + table_entity_values, idxs, output_len = utils._get_unique_entities( table, join_key_values, entity_name_to_join_key_map, @@ -215,6 +215,7 @@ def get_online_features( full_feature_names, requested_features, table, + output_len, ) if requested_on_demand_feature_views: @@ -274,7 +275,7 @@ async def get_online_features_async( async def query_table(table, requested_features): # Get the correct set of entity values with the correct join keys. - table_entity_values, idxs = utils._get_unique_entities( + table_entity_values, idxs, output_len = utils._get_unique_entities( table, join_key_values, entity_name_to_join_key_map, @@ -290,7 +291,7 @@ async def query_table(table, requested_features): requested_features=requested_features, ) - return idxs, read_rows + return idxs, read_rows, output_len all_responses = await asyncio.gather( *[ @@ -299,7 +300,7 @@ async def query_table(table, requested_features): ] ) - for (idxs, read_rows), (table, requested_features) in zip( + for (idxs, read_rows, output_len), (table, requested_features) in zip( all_responses, grouped_refs ): feature_data = utils._convert_rows_to_protobuf( @@ -314,6 +315,7 @@ async def query_table(table, requested_features): full_feature_names, requested_features, table, + output_len, ) if requested_on_demand_feature_views: @@ -390,7 +392,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], embedding: List[float], top_k: int, distance_metric: Optional[str] = None, @@ -411,6 +414,7 @@ def retrieve_online_documents( config: The config for the current feature store. table: The feature view whose feature values should be read. requested_feature: The name of the feature whose embeddings should be used for retrieval. + requested_features: The list of features whose embeddings should be used for retrieval. embedding: The embeddings to use for retrieval. top_k: The number of documents to retrieve. @@ -419,6 +423,50 @@ def retrieve_online_documents( where the first item is the event timestamp for the row, and the second item is a dict of feature name to embeddings. """ + if not requested_feature and not requested_features: + raise ValueError( + "Either requested_feature or requested_features must be specified" + ) + raise NotImplementedError( + f"Online store {self.__class__.__name__} does not support online retrieval" + ) + + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: List[str], + embedding: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ]: + """ + Retrieves online feature values for the specified embeddings. + + Args: + distance_metric: distance metric to use for retrieval. + config: The config for the current feature store. + table: The feature view whose feature values should be read. + requested_features: The list of features whose embeddings should be used for retrieval. + embedding: The embeddings to use for retrieval (optional) + top_k: The number of documents to retrieve. + query_string: The query string to search for using keyword search (bm25) (optional) + + Returns: + object: A list of top k closest documents to the specified embedding. Each item in the list is a tuple + where the first item is the event timestamp for the row, and the second item is a dict of feature + name to embeddings. + """ + assert embedding is not None or query_string is not None, ( + "Either embedding or query_string must be specified" + ) raise NotImplementedError( f"Online store {self.__class__.__name__} does not support online retrieval" ) diff --git a/sdk/python/feast/infra/online_stores/postgres_online_store/postgres.py b/sdk/python/feast/infra/online_stores/postgres_online_store/postgres.py index 7c099c80ecc..4f519003d61 100644 --- a/sdk/python/feast/infra/online_stores/postgres_online_store/postgres.py +++ b/sdk/python/feast/infra/online_stores/postgres_online_store/postgres.py @@ -58,7 +58,9 @@ class PostgreSQLOnlineStore(OnlineStore): _conn_pool_async: Optional[AsyncConnectionPool] = None @contextlib.contextmanager - def _get_conn(self, config: RepoConfig) -> Generator[Connection, Any, Any]: + def _get_conn( + self, config: RepoConfig, autocommit: bool = False + ) -> Generator[Connection, Any, Any]: assert config.online_store.type == "postgres" if config.online_store.conn_type == ConnectionType.pool: @@ -66,16 +68,18 @@ def _get_conn(self, config: RepoConfig) -> Generator[Connection, Any, Any]: self._conn_pool = _get_connection_pool(config.online_store) self._conn_pool.open() connection = self._conn_pool.getconn() + connection.set_autocommit(autocommit) yield connection self._conn_pool.putconn(connection) else: if not self._conn: self._conn = _get_conn(config.online_store) + self._conn.set_autocommit(autocommit) yield self._conn @contextlib.asynccontextmanager async def _get_conn_async( - self, config: RepoConfig + self, config: RepoConfig, autocommit: bool = False ) -> AsyncGenerator[AsyncConnection, Any]: if config.online_store.conn_type == ConnectionType.pool: if not self._conn_pool_async: @@ -84,11 +88,13 @@ async def _get_conn_async( ) await self._conn_pool_async.open() connection = await self._conn_pool_async.getconn() + await connection.set_autocommit(autocommit) yield connection await self._conn_pool_async.putconn(connection) else: if not self._conn_async: self._conn_async = await _get_conn_async(config.online_store) + await self._conn_async.set_autocommit(autocommit) yield self._conn_async def online_write_batch( @@ -161,7 +167,7 @@ def online_read( config, table, keys, requested_features ) - with self._get_conn(config) as conn, conn.cursor() as cur: + with self._get_conn(config, autocommit=True) as conn, conn.cursor() as cur: cur.execute(query, params) rows = cur.fetchall() @@ -179,7 +185,7 @@ async def online_read_async( config, table, keys, requested_features ) - async with self._get_conn_async(config) as conn: + async with self._get_conn_async(config, autocommit=True) as conn: async with conn.cursor() as cur: await cur.execute(query, params) rows = await cur.fetchall() @@ -339,6 +345,7 @@ def teardown( for table in tables: table_name = _table_id(project, table) cur.execute(_drop_table_and_index(table_name)) + conn.commit() except Exception: logging.exception("Teardown failed") raise @@ -347,7 +354,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], embedding: List[float], top_k: int, distance_metric: Optional[str] = "L2", @@ -366,6 +374,7 @@ def retrieve_online_documents( config: Feast configuration object table: FeatureView object as the table to search requested_feature: The requested feature as the column to search + requested_features: The list of features whose embeddings should be used for retrieval. embedding: The query embedding to search for top_k: The number of items to return distance_metric: The distance metric to use for the search.G @@ -396,7 +405,7 @@ def retrieve_online_documents( Optional[ValueProto], ] ] = [] - with self._get_conn(config) as conn, conn.cursor() as cur: + with self._get_conn(config, autocommit=True) as conn, conn.cursor() as cur: table_name = _table_id(project, table) # Search query template to find the top k items that are closest to the given embedding diff --git a/sdk/python/feast/infra/online_stores/qdrant_online_store/qdrant.py b/sdk/python/feast/infra/online_stores/qdrant_online_store/qdrant.py index 074c52ba5e8..81652c3e2a9 100644 --- a/sdk/python/feast/infra/online_stores/qdrant_online_store/qdrant.py +++ b/sdk/python/feast/infra/online_stores/qdrant_online_store/qdrant.py @@ -69,9 +69,9 @@ def _get_client(self, config: RepoConfig) -> QdrantClient: if self._client: return self._client online_store_config = config.online_store - assert isinstance( - online_store_config, QdrantOnlineStoreConfig - ), "Invalid type for online store config" + assert isinstance(online_store_config, QdrantOnlineStoreConfig), ( + "Invalid type for online store config" + ) assert online_store_config.similarity and ( online_store_config.similarity.lower() in DISTANCE_MAPPING @@ -248,7 +248,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], embedding: List[float], top_k: int, distance_metric: Optional[str] = "cosine", diff --git a/sdk/python/feast/infra/online_stores/singlestore_online_store/singlestore.py b/sdk/python/feast/infra/online_stores/singlestore_online_store/singlestore.py index d78289c8671..a1535589542 100644 --- a/sdk/python/feast/infra/online_stores/singlestore_online_store/singlestore.py +++ b/sdk/python/feast/infra/online_stores/singlestore_online_store/singlestore.py @@ -50,6 +50,7 @@ def _init_conn(self, config: RepoConfig) -> Connection: password=online_store_config.password or "test", database=online_store_config.database or "feast", port=online_store_config.port or 3306, + conn_attrs={"_connector_name": "SingleStore Feast Online Store"}, autocommit=True, ) diff --git a/sdk/python/feast/infra/online_stores/sqlite.py b/sdk/python/feast/infra/online_stores/sqlite.py index 1b79b1a94ba..15ef81188b0 100644 --- a/sdk/python/feast/infra/online_stores/sqlite.py +++ b/sdk/python/feast/infra/online_stores/sqlite.py @@ -15,19 +15,22 @@ import logging import os import sqlite3 -import struct import sys -from datetime import datetime +from datetime import date, datetime from pathlib import Path from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union -from google.protobuf.internal.containers import RepeatedScalarFieldContainer from pydantic import StrictStr from feast import Entity from feast.feature_view import FeatureView +from feast.field import Field from feast.infra.infra_object import SQLITE_INFRA_OBJECT_CLASS_TYPE, InfraObject -from feast.infra.key_encoding_utils import serialize_entity_key +from feast.infra.key_encoding_utils import ( + deserialize_entity_key, + serialize_entity_key, + serialize_f32, +) from feast.infra.online_stores.online_store import OnlineStore from feast.infra.online_stores.vector_store import VectorStoreConfig from feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto @@ -36,7 +39,53 @@ from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto from feast.protos.feast.types.Value_pb2 import Value as ValueProto from feast.repo_config import FeastConfigBaseModel, RepoConfig -from feast.utils import _build_retrieve_online_document_record, to_naive_utc +from feast.type_map import feast_value_type_to_python_type +from feast.types import FEAST_VECTOR_TYPES, PrimitiveFeastType +from feast.utils import ( + _build_retrieve_online_document_record, + _serialize_vector_to_float_list, + to_naive_utc, +) + + +def adapt_date_iso(val: date): + """Adapt datetime.date to ISO 8601 date.""" + return val.isoformat() + + +def adapt_datetime_iso(val: datetime): + """Adapt datetime.datetime to timezone-naive ISO 8601 date.""" + return val.isoformat() + + +def adapt_datetime_epoch(val: datetime): + """Adapt datetime.datetime to Unix timestamp.""" + return int(val.timestamp()) + + +sqlite3.register_adapter(date, adapt_date_iso) +sqlite3.register_adapter(datetime, adapt_datetime_iso) +sqlite3.register_adapter(datetime, adapt_datetime_epoch) + + +def convert_date(val: bytes): + """Convert ISO 8601 date to datetime.date object.""" + return date.fromisoformat(val.decode()) + + +def convert_datetime(val: bytes): + """Convert ISO 8601 datetime to datetime.datetime object.""" + return datetime.fromisoformat(val.decode()) + + +def convert_timestamp(val: bytes): + """Convert Unix epoch timestamp to datetime.datetime object.""" + return datetime.fromtimestamp(int(val)) + + +sqlite3.register_converter("date", convert_date) +sqlite3.register_converter("datetime", convert_datetime) +sqlite3.register_converter("timestamp", convert_timestamp) class SqliteOnlineStoreConfig(FeastConfigBaseModel, VectorStoreConfig): @@ -50,6 +99,10 @@ class SqliteOnlineStoreConfig(FeastConfigBaseModel, VectorStoreConfig): path: StrictStr = "data/online.db" """ (optional) Path to sqlite db """ + vector_enabled: bool = False + vector_len: Optional[int] = None + text_search_enabled: bool = False + class SqliteOnlineStore(OnlineStore): """ @@ -75,14 +128,12 @@ def _get_db_path(config: RepoConfig) -> str: return db_path def _get_conn(self, config: RepoConfig): + enable_sqlite_vec = ( + sys.version_info[0:2] == (3, 10) and config.online_store.vector_enabled + ) if not self._conn: db_path = self._get_db_path(config) - self._conn = _initialize_conn(db_path) - if sys.version_info[0:2] == (3, 10) and config.online_store.vector_enabled: - import sqlite_vec # noqa: F401 - - self._conn.enable_load_extension(True) # type: ignore - sqlite_vec.load(self._conn) + self._conn = _initialize_conn(db_path, enable_sqlite_vec) return self._conn @@ -101,9 +152,8 @@ def online_write_batch( progress: Optional[Callable[[int], Any]], ) -> None: conn = self._get_conn(config) - project = config.project - + feature_type_dict = {f.name: f.dtype for f in table.features} with conn: for entity_key, values, timestamp, created_ts in data: entity_key_bin = serialize_entity_key( @@ -117,71 +167,53 @@ def online_write_batch( table_name = _table_id(project, table) for feature_name, val in values.items(): if config.online_store.vector_enabled: - vector_bin = serialize_f32( - val.float_list_val.val, config.online_store.vector_len - ) # type: ignore + if ( + feature_type_dict.get(feature_name, None) + in FEAST_VECTOR_TYPES + ): + val_bin = serialize_f32( + val.float_list_val.val, config.online_store.vector_len + ) # type: ignore + else: + val_bin = feast_value_type_to_python_type(val) conn.execute( f""" - UPDATE {table_name} - SET value = ?, vector_value = ?, event_ts = ?, created_ts = ? - WHERE (entity_key = ? AND feature_name = ?) - """, - ( - # SET - val.SerializeToString(), - vector_bin, - timestamp, - created_ts, - # WHERE - entity_key_bin, - feature_name, - ), - ) - - conn.execute( - f"""INSERT OR IGNORE INTO {table_name} - (entity_key, feature_name, value, vector_value, event_ts, created_ts) - VALUES (?, ?, ?, ?, ?, ?)""", + INSERT INTO {table_name} (entity_key, feature_name, value, vector_value, event_ts, created_ts) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(entity_key, feature_name) DO UPDATE SET + value = excluded.value, + vector_value = excluded.vector_value, + event_ts = excluded.event_ts, + created_ts = excluded.created_ts; + """, ( - entity_key_bin, - feature_name, - val.SerializeToString(), - vector_bin, - timestamp, - created_ts, + entity_key_bin, # entity_key + feature_name, # feature_name + val.SerializeToString(), # value + val_bin, # vector_value + timestamp, # event_ts + created_ts, # created_ts ), ) - else: conn.execute( f""" - UPDATE {table_name} - SET value = ?, event_ts = ?, created_ts = ? - WHERE (entity_key = ? AND feature_name = ?) + INSERT INTO {table_name} (entity_key, feature_name, value, event_ts, created_ts) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT(entity_key, feature_name) DO UPDATE SET + value = excluded.value, + event_ts = excluded.event_ts, + created_ts = excluded.created_ts; """, ( - # SET - val.SerializeToString(), - timestamp, - created_ts, - # WHERE - entity_key_bin, - feature_name, + entity_key_bin, # entity_key + feature_name, # feature_name + val.SerializeToString(), # value + timestamp, # event_ts + created_ts, # created_ts ), ) - conn.execute( - f"""INSERT OR IGNORE INTO {table_name} - (entity_key, feature_name, value, event_ts, created_ts) - VALUES (?, ?, ?, ?, ?)""", - ( - entity_key_bin, - feature_name, - val.SerializeToString(), - timestamp, - created_ts, - ), - ) if progress: progress(1) @@ -197,22 +229,22 @@ def online_read( result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = [] + serialized_entity_keys = [ + serialize_entity_key( + entity_key, + entity_key_serialization_version=config.entity_key_serialization_version, + ) + for entity_key in entity_keys + ] # Fetch all entities in one go cur.execute( f"SELECT entity_key, feature_name, value, event_ts " f"FROM {_table_id(config.project, table)} " f"WHERE entity_key IN ({','.join('?' * len(entity_keys))}) " f"ORDER BY entity_key", - [ - serialize_entity_key( - entity_key, - entity_key_serialization_version=config.entity_key_serialization_version, - ) - for entity_key in entity_keys - ], + serialized_entity_keys, ) rows = cur.fetchall() - rows = { k: list(group) for k, group in itertools.groupby(rows, key=lambda r: r[0]) } @@ -290,7 +322,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_featuers: Optional[List[str]], embedding: List[float], top_k: int, distance_metric: Optional[str] = None, @@ -325,10 +358,11 @@ def retrieve_online_documents( # Convert the embedding to a binary format instead of using SerializeToString() query_embedding_bin = serialize_f32(embedding, config.online_store.vector_len) table_name = _table_id(project, table) + vector_field = _get_vector_field(table) cur.execute( f""" - CREATE VIRTUAL TABLE vec_example using vec0( + CREATE VIRTUAL TABLE vec_table using vec0( vector_value float[{config.online_store.vector_len}] ); """ @@ -337,16 +371,17 @@ def retrieve_online_documents( # Currently I can only insert the embedding value without crashing SQLite, will report a bug cur.execute( f""" - INSERT INTO vec_example(rowid, vector_value) + INSERT INTO vec_table(rowid, vector_value) select rowid, vector_value from {table_name} + where feature_name = "{vector_field}" """ ) cur.execute( + f""" + CREATE VIRTUAL TABLE IF NOT EXISTS vec_table using vec0( + vector_value float[{config.online_store.vector_len}] + ); """ - INSERT INTO vec_example(rowid, vector_value) - VALUES (?, ?) - """, - (0, query_embedding_bin), ) # Have to join this with the {table_name} to get the feature name and entity_key @@ -364,7 +399,7 @@ def retrieve_online_documents( rowid, vector_value, distance - from vec_example + from vec_table where vector_value match ? order by distance limit ? @@ -392,6 +427,7 @@ def retrieve_online_documents( _build_retrieve_online_document_record( entity_key, string_value if string_value else b"", + # This may be a bug embedding, distance, event_ts, @@ -401,35 +437,251 @@ def retrieve_online_documents( return result + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: List[str], + query: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ]: + """ + Retrieve documents using vector similarity search. + Args: + config: Feast configuration object + table: FeatureView object as the table to search + requested_features: List of requested features to retrieve + query: Query embedding to search for (optional) + top_k: Number of items to return + distance_metric: Distance metric to use (optional) + query_string: The query string to search for using keyword search (bm25) (optional) + Returns: + List of tuples containing the event timestamp, entity key, and feature values + """ + online_store = config.online_store + if not isinstance(online_store, SqliteOnlineStoreConfig): + raise ValueError("online_store must be SqliteOnlineStoreConfig") + if not online_store.vector_enabled and not online_store.text_search_enabled: + raise ValueError( + "You must enable either vector search or text search in the online store config" + ) -def _initialize_conn(db_path: str): - try: - import sqlite_vec # noqa: F401 - except ModuleNotFoundError: - logging.warning("Cannot use sqlite_vec for vector search") + conn = self._get_conn(config) + cur = conn.cursor() + + if online_store.vector_enabled and not online_store.vector_len: + raise ValueError("vector_len is not configured in the online store config") + + table_name = _table_id(config.project, table) + vector_field = _get_vector_field(table) + + if online_store.vector_enabled: + query_embedding_bin = serialize_f32(query, online_store.vector_len) # type: ignore + cur.execute( + f""" + CREATE VIRTUAL TABLE IF NOT EXISTS vec_table using vec0( + vector_value float[{online_store.vector_len}] + ); + """ + ) + cur.execute( + f""" + INSERT INTO vec_table (rowid, vector_value) + select rowid, vector_value from {table_name} + where feature_name = "{vector_field}" + """ + ) + elif online_store.text_search_enabled: + string_field_list = [ + f.name for f in table.features if f.dtype == PrimitiveFeastType.STRING + ] + string_fields = ", ".join(string_field_list) + # TODO: swap this for a value configurable in each Field() + BM25_DEFAULT_WEIGHTS = ", ".join( + [ + str(1.0) + for f in table.features + if f.dtype == PrimitiveFeastType.STRING + ] + ) + cur.execute( + f""" + CREATE VIRTUAL TABLE IF NOT EXISTS search_table using fts5( + entity_key, fv_rowid, {string_fields}, tokenize="porter unicode61" + ); + """ + ) + insert_query = _generate_bm25_search_insert_query( + table_name, string_field_list + ) + cur.execute(insert_query) + + else: + raise ValueError( + "Neither vector search nor text search are enabled in the online store config" + ) + + if online_store.vector_enabled: + cur.execute( + f""" + select + fv2.entity_key, + fv2.feature_name, + fv2.value, + fv.vector_value, + f.distance, + fv.event_ts, + fv.created_ts + from ( + select + rowid, + vector_value, + distance + from vec_table + where vector_value match ? + order by distance + limit ? + ) f + left join {table_name} fv + on f.rowid = fv.rowid + left join {table_name} fv2 + on fv.entity_key = fv2.entity_key + where fv2.feature_name != "{vector_field}" + """, + ( + query_embedding_bin, + top_k, + ), + ) + elif online_store.text_search_enabled: + cur.execute( + f""" + select + fv.entity_key, + fv.feature_name, + fv.value, + fv.vector_value, + f.distance, + fv.event_ts, + fv.created_ts + from {table_name} fv + inner join ( + select + fv_rowid, + entity_key, + {string_fields}, + bm25(search_table, {BM25_DEFAULT_WEIGHTS}) as distance + from search_table + where search_table match ? order by distance limit ? + ) f + on f.entity_key = fv.entity_key + """, + (query_string, top_k), + ) + + else: + raise ValueError( + "Neither vector search nor text search are enabled in the online store config" + ) + + rows = cur.fetchall() + results: List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ] = [] + + entity_dict: Dict[ + str, Dict[str, Union[str, ValueProto, EntityKeyProto, datetime]] + ] = {} + for ( + entity_key, + feature_name, + value_bin, + vector_value, + distance, + event_ts, + created_ts, + ) in rows: + entity_key_proto = deserialize_entity_key( + entity_key, + entity_key_serialization_version=config.entity_key_serialization_version, + ) + if entity_key not in entity_dict: + entity_dict[entity_key] = {} + + feature_val = ValueProto() + feature_val.ParseFromString(value_bin) + entity_dict[entity_key]["entity_key_proto"] = entity_key_proto + entity_dict[entity_key][feature_name] = feature_val + if online_store.vector_enabled: + entity_dict[entity_key][vector_field] = _serialize_vector_to_float_list( + vector_value + ) + entity_dict[entity_key]["distance"] = ValueProto(float_val=distance) + entity_dict[entity_key]["event_ts"] = event_ts + entity_dict[entity_key]["created_ts"] = created_ts + + for entity_key_value in entity_dict: + res_event_ts: Optional[datetime] = None + res_entity_key_proto: Optional[EntityKeyProto] = None + if isinstance(entity_dict[entity_key_value]["event_ts"], datetime): + res_event_ts = entity_dict[entity_key_value]["event_ts"] # type: ignore[assignment] + + if isinstance( + entity_dict[entity_key_value]["entity_key_proto"], EntityKeyProto + ): + res_entity_key_proto = entity_dict[entity_key_value]["entity_key_proto"] # type: ignore[assignment] + + res_dict: Dict[str, ValueProto] = { + k: v + for k, v in entity_dict[entity_key_value].items() + if isinstance(v, ValueProto) and isinstance(k, str) + } + + results.append( + ( + res_event_ts, + res_entity_key_proto, + res_dict, + ) + ) + return results + + +def _initialize_conn( + db_path: str, enable_sqlite_vec: bool = False +) -> sqlite3.Connection: Path(db_path).parent.mkdir(exist_ok=True) - return sqlite3.connect( + db = sqlite3.connect( db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES, check_same_thread=False, ) + if enable_sqlite_vec: + try: + import sqlite_vec # noqa: F401 + except ModuleNotFoundError: + logging.warning("Cannot use sqlite_vec for vector search") + db.enable_load_extension(True) + sqlite_vec.load(db) -def _table_id(project: str, table: FeatureView) -> str: - return f"{project}_{table.name}" - - -def serialize_f32( - vector: Union[RepeatedScalarFieldContainer[float], List[float]], vector_length: int -) -> bytes: - """serializes a list of floats into a compact "raw bytes" format""" - return struct.pack(f"{vector_length}f", *vector) + return db -def deserialize_f32(byte_vector: bytes, vector_length: int) -> List[float]: - """deserializes a list of floats from a compact "raw bytes" format""" - num_floats = vector_length // 4 # 4 bytes per float - return list(struct.unpack(f"{num_floats}f", byte_vector)) +def _table_id(project: str, table: FeatureView) -> str: + return f"{project}_{table.name}" class SqliteTable(InfraObject): @@ -487,7 +739,17 @@ def update(self): except ModuleNotFoundError: logging.warning("Cannot use sqlite_vec for vector search") self.conn.execute( - f"CREATE TABLE IF NOT EXISTS {self.name} (entity_key BLOB, feature_name TEXT, value BLOB, vector_value BLOB, event_ts timestamp, created_ts timestamp, PRIMARY KEY(entity_key, feature_name))" + f""" + CREATE TABLE IF NOT EXISTS {self.name} ( + entity_key BLOB, + feature_name TEXT, + value BLOB, + vector_value BLOB, + event_ts timestamp, + created_ts timestamp, + PRIMARY KEY(entity_key, feature_name) + ) + """ ) self.conn.execute( f"CREATE INDEX IF NOT EXISTS {self.name}_ek ON {self.name} (entity_key);" @@ -495,3 +757,48 @@ def update(self): def teardown(self): self.conn.execute(f"DROP TABLE IF EXISTS {self.name}") + + +def _get_vector_field(table: FeatureView) -> str: + """ + Get the vector field from the feature view. There can be only one. + """ + vector_fields: List[Field] = [ + f for f in table.features if getattr(f, "vector_index", None) + ] + assert len(vector_fields) > 0, ( + f"No vector field found, please update feature view = {table.name} to declare a vector field" + ) + assert len(vector_fields) < 2, ( + "Only one vector field is supported, please update feature view = {table.name} to declare one vector field" + ) + vector_field: str = vector_fields[0].name + return vector_field + + +def _generate_bm25_search_insert_query( + table_name: str, string_field_list: List[str] +) -> str: + """ + Generates an SQL insertion query for the given table and string fields. + + Args: + table_name (str): The name of the table to select data from. + string_field_list (List[str]): The list of string fields to be used in the insertion. + + Returns: + str: The generated SQL insertion query. + """ + _string_fields = ", ".join(string_field_list) + query = f"INSERT INTO search_table (entity_key, fv_rowid, {_string_fields})\nSELECT\n\tDISTINCT fv0.entity_key,\n\tfv0.rowid as fv_rowid" + from_query = f"\nFROM (select rowid, * from {table_name} where feature_name = '{string_field_list[0]}') fv0" + + for i, string_field in enumerate(string_field_list): + query += f"\n\t,fv{i}.value as {string_field}" + if i > 0: + from_query += ( + f"\nLEFT JOIN (select rowid, * from {table_name} where feature_name = '{string_field}') fv{i}" + + f"\n\tON fv0.entity_key = fv{i}.entity_key" + ) + + return query + from_query diff --git a/sdk/python/feast/infra/passthrough_provider.py b/sdk/python/feast/infra/passthrough_provider.py index 215b175eb2e..4e504997d2a 100644 --- a/sdk/python/feast/infra/passthrough_provider.py +++ b/sdk/python/feast/infra/passthrough_provider.py @@ -294,7 +294,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], query: List[float], top_k: int, distance_metric: Optional[str] = None, @@ -305,12 +306,36 @@ def retrieve_online_documents( config, table, requested_feature, + requested_features, query, top_k, distance_metric, ) return result + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: Optional[List[str]], + query: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List: + result = [] + if self.online_store: + result = self.online_store.retrieve_online_documents_v2( + config, + table, + requested_features, + query, + top_k, + distance_metric, + query_string, + ) + return result + @staticmethod def _prep_rows_to_write_for_ingestion( feature_view: Union[BaseFeatureView, FeatureView, OnDemandFeatureView], @@ -424,7 +449,7 @@ def materialize_single_feature_view( def get_historical_features( self, config: RepoConfig, - feature_views: List[FeatureView], + feature_views: List[Union[FeatureView, OnDemandFeatureView]], feature_refs: List[str], entity_df: Union[pd.DataFrame, str], registry: BaseRegistry, @@ -471,9 +496,9 @@ def write_feature_service_logs( config: RepoConfig, registry: BaseRegistry, ): - assert ( - feature_service.logging_config is not None - ), "Logging should be configured for the feature service before calling this function" + assert feature_service.logging_config is not None, ( + "Logging should be configured for the feature service before calling this function" + ) self.offline_store.write_logged_features( config=config, @@ -491,9 +516,9 @@ def retrieve_feature_service_logs( config: RepoConfig, registry: BaseRegistry, ) -> RetrievalJob: - assert ( - feature_service.logging_config is not None - ), "Logging should be configured for the feature service before calling this function" + assert feature_service.logging_config is not None, ( + "Logging should be configured for the feature service before calling this function" + ) logging_source = FeatureServiceLoggingSource(feature_service, config.project) schema = logging_source.get_schema(registry) diff --git a/sdk/python/feast/infra/provider.py b/sdk/python/feast/infra/provider.py index 8351f389ad9..18fbd051771 100644 --- a/sdk/python/feast/infra/provider.py +++ b/sdk/python/feast/infra/provider.py @@ -242,7 +242,7 @@ def materialize_single_feature_view( def get_historical_features( self, config: RepoConfig, - feature_views: List[FeatureView], + feature_views: List[Union[FeatureView, OnDemandFeatureView]], feature_refs: List[str], entity_df: Union[pd.DataFrame, str], registry: BaseRegistry, @@ -419,7 +419,8 @@ def retrieve_online_documents( self, config: RepoConfig, table: FeatureView, - requested_feature: str, + requested_feature: Optional[str], + requested_features: Optional[List[str]], query: List[float], top_k: int, distance_metric: Optional[str] = None, @@ -430,7 +431,7 @@ def retrieve_online_documents( Optional[ValueProto], Optional[ValueProto], Optional[ValueProto], - ] + ], ]: """ Searches for the top-k most similar documents in the online document store. @@ -440,6 +441,7 @@ def retrieve_online_documents( config: The config for the current feature store. table: The feature view whose embeddings should be searched. requested_feature: the requested document feature name. + requested_features: the requested document feature names. query: The query embedding to search for. top_k: The number of documents to return. @@ -448,6 +450,41 @@ def retrieve_online_documents( """ pass + @abstractmethod + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: List[str], + query: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ]: + """ + Searches for the top-k most similar documents in the online document store. + + Args: + distance_metric: distance metric to use for the search. + config: The config for the current feature store. + table: The feature view whose embeddings should be searched. + requested_features: the requested document feature names. + query: The query embedding to search for (optional). + top_k: The number of documents to return. + query_string: The query string to search for using keyword search (bm25) (optional) + + Returns: + A list of dictionaries, where each dictionary contains the datetime, entitykey, and a dictionary + of feature key value pairs + """ + pass + @abstractmethod def validate_data_source( self, diff --git a/sdk/python/feast/infra/registry/caching_registry.py b/sdk/python/feast/infra/registry/caching_registry.py index 042eee06ab7..23ab80ee1d8 100644 --- a/sdk/python/feast/infra/registry/caching_registry.py +++ b/sdk/python/feast/infra/registry/caching_registry.py @@ -425,12 +425,24 @@ def list_projects( return self._list_projects(tags) def refresh(self, project: Optional[str] = None): - self.cached_registry_proto = self.proto() - self.cached_registry_proto_created = _utc_now() + if self._refresh_lock.locked(): + logger.info("Skipping refresh if already in progress") + return + try: + self.cached_registry_proto = self.proto() + self.cached_registry_proto_created = _utc_now() + except Exception as e: + logger.error(f"Error while refreshing registry: {e}", exc_info=True) def _refresh_cached_registry_if_necessary(self): if self.cache_mode == "sync": - with self._refresh_lock: + # Try acquiring the lock without blocking + if not self._refresh_lock.acquire(blocking=False): + logger.info( + "Skipping refresh if lock is already held by another thread" + ) + return + try: if self.cached_registry_proto == RegistryProto(): # Avoids the need to refresh the registry when cache is not populated yet # Specially during the __init__ phase @@ -454,6 +466,13 @@ def _refresh_cached_registry_if_necessary(self): if expired: logger.info("Registry cache expired, so refreshing") self.refresh() + except Exception as e: + logger.error( + f"Error in _refresh_cached_registry_if_necessary: {e}", + exc_info=True, + ) + finally: + self._refresh_lock.release() # Always release the lock safely def _start_thread_async_refresh(self, cache_ttl_seconds): self.refresh() diff --git a/sdk/python/feast/infra/registry/remote.py b/sdk/python/feast/infra/registry/remote.py index 6cc80d5dad1..590c0454b73 100644 --- a/sdk/python/feast/infra/registry/remote.py +++ b/sdk/python/feast/infra/registry/remote.py @@ -1,3 +1,4 @@ +import os from datetime import datetime from pathlib import Path from typing import List, Optional, Union @@ -59,6 +60,12 @@ class RemoteRegistryConfig(RegistryConfig): """ str: Path to the public certificate when the registry server starts in TLS(SSL) mode. This may be needed if the registry server started with a self-signed certificate, typically this file ends with `*.crt`, `*.cer`, or `*.pem`. If registry_type is 'remote', then this configuration is needed to connect to remote registry server in TLS mode. If the remote registry started in non-tls mode then this configuration is not needed.""" + is_tls: bool = False + """ bool: Set to `True` if you plan to connect to a registry server running in TLS (SSL) mode. + If you intend to add the public certificate to the trust store instead of passing it via the `cert` parameter, this field must be set to `True`. + If you are planning to add the public certificate as part of the trust store instead of passing it as a `cert` parameters then setting this field to `true` is mandatory. + """ + class RemoteRegistry(BaseRegistry): def __init__( @@ -70,20 +77,32 @@ def __init__( ): self.auth_config = auth_config assert isinstance(registry_config, RemoteRegistryConfig) - if registry_config.cert: - with open(registry_config.cert, "rb") as cert_file: - trusted_certs = cert_file.read() - tls_credentials = grpc.ssl_channel_credentials( - root_certificates=trusted_certs - ) - self.channel = grpc.secure_channel(registry_config.path, tls_credentials) - else: - self.channel = grpc.insecure_channel(registry_config.path) + self.channel = self._create_grpc_channel(registry_config) auth_header_interceptor = GrpcClientAuthHeaderInterceptor(auth_config) self.channel = grpc.intercept_channel(self.channel, auth_header_interceptor) self.stub = RegistryServer_pb2_grpc.RegistryServerStub(self.channel) + def _create_grpc_channel(self, registry_config): + assert isinstance(registry_config, RemoteRegistryConfig) + if registry_config.cert or registry_config.is_tls: + cafile = os.getenv("SSL_CERT_FILE") or os.getenv("REQUESTS_CA_BUNDLE") + if not cafile and not registry_config.cert: + raise EnvironmentError( + "SSL_CERT_FILE or REQUESTS_CA_BUNDLE environment variable must be set to use secure TLS or set the cert parameter in feature_Store.yaml file under remote registry configuration." + ) + with open( + registry_config.cert if registry_config.cert else cafile, "rb" + ) as cert_file: + trusted_certs = cert_file.read() + tls_credentials = grpc.ssl_channel_credentials( + root_certificates=trusted_certs + ) + return grpc.secure_channel(registry_config.path, tls_credentials) + else: + # Create an insecure gRPC channel + return grpc.insecure_channel(registry_config.path) + def close(self): if self.channel: self.channel.close() diff --git a/sdk/python/feast/infra/utils/aws_utils.py b/sdk/python/feast/infra/utils/aws_utils.py index 0526cf8b65c..39fa815f7e3 100644 --- a/sdk/python/feast/infra/utils/aws_utils.py +++ b/sdk/python/feast/infra/utils/aws_utils.py @@ -1062,7 +1062,7 @@ def upload_arrow_table_to_athena( f"CREATE EXTERNAL TABLE {database}.{table_name} {'IF NOT EXISTS' if not fail_if_exists else ''}" f"({column_query_list}) " f"STORED AS PARQUET " - f"LOCATION '{s3_path[:s3_path.rfind('/')]}' " + f"LOCATION '{s3_path[: s3_path.rfind('/')]}' " f"TBLPROPERTIES('parquet.compress' = 'SNAPPY') " ) diff --git a/sdk/python/feast/infra/utils/couchbase/__init__.py b/sdk/python/feast/infra/utils/couchbase/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/infra/utils/couchbase/couchbase_utils.py b/sdk/python/feast/infra/utils/couchbase/couchbase_utils.py new file mode 100644 index 00000000000..005729274e6 --- /dev/null +++ b/sdk/python/feast/infra/utils/couchbase/couchbase_utils.py @@ -0,0 +1,13 @@ +from datetime import datetime, timezone + + +def normalize_timestamp( + dt: datetime, target_format: str = "%Y-%m-%dT%H:%M:%S%z" +) -> str: + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) # Assume UTC for naive datetimes + # Convert to UTC + utc_dt = dt.astimezone(timezone.utc) + # Format with strftime + formatted = utc_dt.strftime(target_format) + return formatted diff --git a/sdk/python/feast/infra/utils/snowflake/snowflake_utils.py b/sdk/python/feast/infra/utils/snowflake/snowflake_utils.py index b9035b40dbf..b9254e72699 100644 --- a/sdk/python/feast/infra/utils/snowflake/snowflake_utils.py +++ b/sdk/python/feast/infra/utils/snowflake/snowflake_utils.py @@ -513,7 +513,7 @@ def chunk_helper(lst: pd.DataFrame, n: int) -> Iterator[Tuple[int, pd.DataFrame] def parse_private_key_path( - private_key_passphrase: str, + private_key_passphrase: Optional[str] = None, key_path: Optional[str] = None, private_key_content: Optional[bytes] = None, ) -> bytes: @@ -521,14 +521,18 @@ def parse_private_key_path( if private_key_content: p_key = serialization.load_pem_private_key( private_key_content, - password=private_key_passphrase.encode(), + password=private_key_passphrase.encode() + if private_key_passphrase is not None + else None, backend=default_backend(), ) elif key_path: with open(key_path, "rb") as key: p_key = serialization.load_pem_private_key( key.read(), - password=private_key_passphrase.encode(), + password=private_key_passphrase.encode() + if private_key_passphrase is not None + else None, backend=default_backend(), ) else: diff --git a/sdk/python/feast/nlp_test_data.py b/sdk/python/feast/nlp_test_data.py new file mode 100644 index 00000000000..5c0a6af4d61 --- /dev/null +++ b/sdk/python/feast/nlp_test_data.py @@ -0,0 +1,67 @@ +from datetime import datetime +from typing import Dict + +import numpy as np +import pandas as pd + + +def create_document_chunks_df( + documents: Dict[str, str], + start_date: datetime, + end_date: datetime, + embedding_size: int = 60, +) -> pd.DataFrame: + """ + Example df generated by this function: + + | event_timestamp | document_id | chunk_id | chunk_text | embedding | created | + |------------------+-------------+----------+------------------+-----------+------------------| + | 2021-03-17 19:31 | doc_1 | chunk-1 | Hello world | [0.1, ...]| 2021-03-24 19:34 | + | 2021-03-17 19:31 | doc_1 | chunk-2 | How are you? | [0.2, ...]| 2021-03-24 19:34 | + | 2021-03-17 19:31 | doc_2 | chunk-1 | This is a test | [0.3, ...]| 2021-03-24 19:34 | + | 2021-03-17 19:31 | doc_2 | chunk-2 | Document chunk | [0.4, ...]| 2021-03-24 19:34 | + """ + df_hourly = pd.DataFrame( + { + "event_timestamp": [ + pd.Timestamp(dt, unit="ms").round("ms") + for dt in pd.date_range( + start=start_date, + end=end_date, + freq="1h", + inclusive="left", + tz="UTC", + ) + ] + + [ + pd.Timestamp( + year=2021, month=4, day=12, hour=7, minute=0, second=0, tz="UTC" + ) + ] + } + ) + df_all_chunks = pd.DataFrame() + + for doc_id, doc_text in documents.items(): + chunks = doc_text.split(". ") # Simple chunking by sentence + for chunk_id, chunk_text in enumerate(chunks, start=1): + df_hourly_copy = df_hourly.copy() + df_hourly_copy["document_id"] = doc_id + df_hourly_copy["chunk_id"] = f"chunk-{chunk_id}" + df_hourly_copy["chunk_text"] = chunk_text + df_all_chunks = pd.concat([df_hourly_copy, df_all_chunks]) + + df_all_chunks.reset_index(drop=True, inplace=True) + rows = df_all_chunks["event_timestamp"].count() + + # Generate random embeddings for each chunk + df_all_chunks["embedding"] = [ + np.random.rand(embedding_size).tolist() for _ in range(rows) + ] + df_all_chunks["created"] = pd.to_datetime(pd.Timestamp.now(tz=None).round("ms")) + + # Create duplicate rows that should be filtered by created timestamp + late_row = df_all_chunks[rows // 2 : rows // 2 + 1] + df_all_chunks = pd.concat([df_all_chunks, late_row, late_row], ignore_index=True) + + return df_all_chunks diff --git a/sdk/python/feast/offline_server.py b/sdk/python/feast/offline_server.py index cec043129e7..f3642e5812e 100644 --- a/sdk/python/feast/offline_server.py +++ b/sdk/python/feast/offline_server.py @@ -39,12 +39,21 @@ class OfflineServer(fl.FlightServerBase): - def __init__(self, store: FeatureStore, location: str, **kwargs): + def __init__( + self, + store: FeatureStore, + location: str, + host: str = "localhost", + tls_certificates: List = [], + **kwargs, + ): super(OfflineServer, self).__init__( - location, + location=location, middleware=self.arrow_flight_auth_middleware( str_to_auth_manager_type(store.config.auth_config.type) ), + tls_certificates=tls_certificates, + verify_client=False, # this is needed for when we don't need mTLS **kwargs, ) self._location = location @@ -52,6 +61,8 @@ def __init__(self, store: FeatureStore, location: str, **kwargs): self.flights: Dict[str, Any] = {} self.store = store self.offline_store = get_offline_store_from_config(store.config.offline_store) + self.host = host + self.tls_certificates = tls_certificates def arrow_flight_auth_middleware( self, @@ -81,8 +92,13 @@ def descriptor_to_key(self, descriptor: fl.FlightDescriptor): ) def _make_flight_info(self, key: Any, descriptor: fl.FlightDescriptor): - endpoints = [fl.FlightEndpoint(repr(key), [self._location])] - # TODO calculate actual schema from the given features + if len(self.tls_certificates) != 0: + location = fl.Location.for_grpc_tls(self.host, self.port) + else: + location = fl.Location.for_grpc_tcp(self.host, self.port) + endpoints = [ + fl.FlightEndpoint(repr(key), [location]), + ] schema = pa.schema([]) return fl.FlightInfo(schema, descriptor, endpoints, -1, -1) @@ -250,15 +266,15 @@ def do_get(self, context: fl.ServerCallContext, ticket: fl.Ticket): return fl.RecordBatchStream(table) def _validate_offline_write_batch_parameters(self, command: dict): - assert ( - "feature_view_names" in command - ), "feature_view_names is a mandatory parameter" + assert "feature_view_names" in command, ( + "feature_view_names is a mandatory parameter" + ) assert "name_aliases" in command, "name_aliases is a mandatory parameter" feature_view_names = command["feature_view_names"] - assert ( - len(feature_view_names) == 1 - ), "feature_view_names list should only have one item" + assert len(feature_view_names) == 1, ( + "feature_view_names list should only have one item" + ) name_aliases = command["name_aliases"] assert len(name_aliases) == 1, "name_aliases list should only have one item" @@ -300,9 +316,9 @@ def write_logged_features(self, command: dict, key: str): command["feature_service_name"] ) - assert ( - feature_service.logging_config is not None - ), "feature service must have logging_config set" + assert feature_service.logging_config is not None, ( + "feature service must have logging_config set" + ) assert_permissions( resource=feature_service, @@ -319,15 +335,15 @@ def write_logged_features(self, command: dict, key: str): ) def _validate_pull_all_from_table_or_query_parameters(self, command: dict): - assert ( - "data_source_name" in command - ), "data_source_name is a mandatory parameter" - assert ( - "join_key_columns" in command - ), "join_key_columns is a mandatory parameter" - assert ( - "feature_name_columns" in command - ), "feature_name_columns is a mandatory parameter" + assert "data_source_name" in command, ( + "data_source_name is a mandatory parameter" + ) + assert "join_key_columns" in command, ( + "join_key_columns is a mandatory parameter" + ) + assert "feature_name_columns" in command, ( + "feature_name_columns is a mandatory parameter" + ) assert "timestamp_field" in command, "timestamp_field is a mandatory parameter" assert "start_date" in command, "start_date is a mandatory parameter" assert "end_date" in command, "end_date is a mandatory parameter" @@ -348,15 +364,15 @@ def pull_all_from_table_or_query(self, command: dict): ) def _validate_pull_latest_from_table_or_query_parameters(self, command: dict): - assert ( - "data_source_name" in command - ), "data_source_name is a mandatory parameter" - assert ( - "join_key_columns" in command - ), "join_key_columns is a mandatory parameter" - assert ( - "feature_name_columns" in command - ), "feature_name_columns is a mandatory parameter" + assert "data_source_name" in command, ( + "data_source_name is a mandatory parameter" + ) + assert "join_key_columns" in command, ( + "join_key_columns is a mandatory parameter" + ) + assert "feature_name_columns" in command, ( + "feature_name_columns is a mandatory parameter" + ) assert "timestamp_field" in command, "timestamp_field is a mandatory parameter" assert "start_date" in command, "start_date is a mandatory parameter" assert "end_date" in command, "end_date is a mandatory parameter" @@ -549,11 +565,31 @@ def start_server( store: FeatureStore, host: str, port: int, + tls_key_path: str = "", + tls_cert_path: str = "", ): _init_auth_manager(store) - location = "grpc+tcp://{}:{}".format(host, port) - server = OfflineServer(store, location) + tls_certificates = [] + scheme = "grpc+tcp" + if tls_key_path and tls_cert_path: + logger.info( + "Found SSL certificates in the args so going to start offline server in TLS(SSL) mode." + ) + scheme = "grpc+tls" + with open(tls_cert_path, "rb") as cert_file: + tls_cert_chain = cert_file.read() + with open(tls_key_path, "rb") as key_file: + tls_private_key = key_file.read() + tls_certificates.append((tls_cert_chain, tls_private_key)) + + location = "{}://{}:{}".format(scheme, host, port) + server = OfflineServer( + store, + location=location, + host=host, + tls_certificates=tls_certificates, + ) try: logger.info(f"Offline store server serving at: {location}") server.serve() diff --git a/sdk/python/feast/on_demand_feature_view.py b/sdk/python/feast/on_demand_feature_view.py index 0ae87b5e35a..6397c9fb640 100644 --- a/sdk/python/feast/on_demand_feature_view.py +++ b/sdk/python/feast/on_demand_feature_view.py @@ -1,12 +1,10 @@ import copy import functools -import inspect import warnings from types import FunctionType -from typing import Any, List, Optional, Union, get_type_hints +from typing import Any, List, Optional, Union, cast import dill -import pandas as pd import pyarrow from typeguard import typechecked @@ -31,6 +29,8 @@ from feast.protos.feast.core.Transformation_pb2 import ( UserDefinedFunctionV2 as UserDefinedFunctionProto, ) +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode from feast.transformation.pandas_transformation import PandasTransformation from feast.transformation.python_transformation import PythonTransformation from feast.transformation.substrait_transformation import SubstraitTransformation @@ -66,15 +66,15 @@ class OnDemandFeatureView(BaseFeatureView): features: List[Field] source_feature_view_projections: dict[str, FeatureViewProjection] source_request_sources: dict[str, RequestSource] - feature_transformation: Union[ - PandasTransformation, PythonTransformation, SubstraitTransformation - ] + feature_transformation: Transformation mode: str description: str tags: dict[str, str] owner: str write_to_online_store: bool singleton: bool + udf: Optional[FunctionType] + udf_string: Optional[str] def __init__( # noqa: C901 self, @@ -90,10 +90,8 @@ def __init__( # noqa: C901 ] ], udf: Optional[FunctionType] = None, - udf_string: str = "", - feature_transformation: Union[ - PandasTransformation, PythonTransformation, SubstraitTransformation - ], + udf_string: Optional[str] = "", + feature_transformation: Optional[Transformation] = None, mode: str = "pandas", description: str = "", tags: Optional[dict[str, str]] = None, @@ -112,9 +110,9 @@ def __init__( # noqa: C901 sources: A map from input source names to the actual input sources, which may be feature views, or request data sources. These sources serve as inputs to the udf, which will refer to them by name. - udf (deprecated): The user defined transformation function, which must take pandas + udf: The user defined transformation function, which must take pandas dataframes as inputs. - udf_string (deprecated): The source code version of the udf (for diffing and displaying in Web UI) + udf_string: The source code version of the udf (for diffing and displaying in Web UI) feature_transformation: The user defined transformation. mode: Mode of execution (e.g., Pandas or Python native) description (optional): A human-readable description. @@ -136,29 +134,10 @@ def __init__( # noqa: C901 schema = schema or [] self.entities = [e.name for e in entities] if entities else [DUMMY_ENTITY_NAME] + self.sources = sources self.mode = mode.lower() - - if self.mode not in {"python", "pandas", "substrait"}: - raise ValueError( - f"Unknown mode {self.mode}. OnDemandFeatureView only supports python or pandas UDFs and substrait." - ) - - if not feature_transformation: - if udf: - warnings.warn( - "udf and udf_string parameters are deprecated. Please use transformation=PandasTransformation(udf, udf_string) instead.", - DeprecationWarning, - ) - # Note inspecting the return signature won't work with isinstance so this is the best alternative - if self.mode == "pandas": - feature_transformation = PandasTransformation(udf, udf_string) - elif self.mode == "python": - feature_transformation = PythonTransformation(udf, udf_string) - else: - raise ValueError( - "OnDemandFeatureView needs to be initialized with either feature_transformation or udf arguments" - ) - + self.udf = udf + self.udf_string = udf_string self.source_feature_view_projections: dict[str, FeatureViewProjection] = {} self.source_request_sources: dict[str, RequestSource] = {} for odfv_source in sources: @@ -206,12 +185,33 @@ def __init__( # noqa: C901 features.append(field) self.features = features - self.feature_transformation = feature_transformation + self.feature_transformation = ( + feature_transformation or self.get_feature_transformation() + ) self.write_to_online_store = write_to_online_store self.singleton = singleton if self.singleton and self.mode != "python": raise ValueError("Singleton is only supported for Python mode.") + def get_feature_transformation(self) -> Transformation: + if not self.udf: + raise ValueError( + "Either udf or feature_transformation must be provided to create an OnDemandFeatureView" + ) + if self.mode in ( + TransformationMode.PANDAS, + TransformationMode.PYTHON, + ) or self.mode in ("pandas", "python"): + return Transformation( + mode=self.mode, udf=self.udf, udf_string=self.udf_string or "" + ) + elif self.mode == TransformationMode.SUBSTRAIT or self.mode == "substrait": + return SubstraitTransformation.from_ibis(self.udf, self.sources) + else: + raise ValueError( + f"Unsupported transformation mode: {self.mode} for OnDemandFeatureView" + ) + @property def proto_class(self) -> type[OnDemandFeatureViewProto]: return OnDemandFeatureViewProto @@ -312,16 +312,25 @@ def to_proto(self) -> OnDemandFeatureViewProto: request_data_source=request_sources.to_proto() ) - feature_transformation = FeatureTransformationProto( - user_defined_function=self.feature_transformation.to_proto() + user_defined_function_proto = cast( + UserDefinedFunctionProto, + self.feature_transformation.to_proto() if isinstance( self.feature_transformation, (PandasTransformation, PythonTransformation), ) else None, - substrait_transformation=self.feature_transformation.to_proto() + ) + + substrait_transformation_proto = ( + self.feature_transformation.to_proto() if isinstance(self.feature_transformation, SubstraitTransformation) - else None, + else None + ) + + feature_transformation = FeatureTransformationProto( + user_defined_function=user_defined_function_proto, + substrait_transformation=substrait_transformation_proto, ) spec = OnDemandFeatureViewSpec( name=self.name, @@ -339,7 +348,6 @@ def to_proto(self) -> OnDemandFeatureViewProto: write_to_online_store=self.write_to_online_store, singleton=self.singleton if self.singleton else False, ) - return OnDemandFeatureViewProto(spec=spec, meta=meta) @classmethod @@ -454,6 +462,8 @@ def from_proto( Field( name=feature.name, dtype=from_value_type(ValueType(feature.value_type)), + vector_index=feature.vector_index, + vector_search_metric=feature.vector_search_metric, ) for feature in on_demand_feature_view_proto.spec.features ], @@ -640,13 +650,25 @@ def transform_dict( def infer_features(self) -> None: random_input = self._construct_random_input(singleton=self.singleton) - inferred_features = self.feature_transformation.infer_features(random_input) + inferred_features = self.feature_transformation.infer_features( + random_input=random_input, singleton=self.singleton + ) if self.features: missing_features = [] for specified_feature in self.features: - if specified_feature not in inferred_features: + if ( + specified_feature not in inferred_features + and "Array" not in specified_feature.dtype.__str__() + ): missing_features.append(specified_feature) + elif "Array" in specified_feature.dtype.__str__(): + if specified_feature.name not in [ + f.name for f in inferred_features + ]: + missing_features.append(specified_feature) + else: + pass if missing_features: raise SpecifiedFeaturesNotPresentError( missing_features, inferred_features, self.name @@ -722,6 +744,7 @@ def get_requested_odfvs( def on_demand_feature_view( *, + name: Optional[str] = None, entities: Optional[List[Entity]] = None, schema: list[Field], sources: list[ @@ -737,11 +760,13 @@ def on_demand_feature_view( owner: str = "", write_to_online_store: bool = False, singleton: bool = False, + explode: bool = False, ): """ Creates an OnDemandFeatureView object with the given user function as udf. Args: + name (optional): The name of the on demand feature view. If not provided, the name will be the name of the user function. entities (Optional): The list of names of entities that this feature view is associated with. schema: The list of features in the output of the on demand feature view, after the transformation has been applied. @@ -757,6 +782,7 @@ def on_demand_feature_view( the online store for faster retrieval. singleton (optional): A boolean that indicates whether the transformation is executed on a singleton (only applicable when mode="python"). + explode (optional): A boolean that indicates whether the transformation explodes the input data into multiple rows. """ def mainify(obj) -> None: @@ -766,35 +792,13 @@ def mainify(obj) -> None: obj.__module__ = "__main__" def decorator(user_function): - return_annotation = get_type_hints(user_function).get("return", inspect._empty) udf_string = dill.source.getsource(user_function) mainify(user_function) - if mode == "pandas": - if return_annotation not in (inspect._empty, pd.DataFrame): - raise TypeError( - f"return signature for {user_function} is {return_annotation} but should be pd.DataFrame" - ) - transformation = PandasTransformation(user_function, udf_string) - elif mode == "python": - if return_annotation not in (inspect._empty, dict[str, Any]): - raise TypeError( - f"return signature for {user_function} is {return_annotation} but should be dict[str, Any]" - ) - transformation = PythonTransformation(user_function, udf_string) - elif mode == "substrait": - from ibis.expr.types.relations import Table - - if return_annotation not in (inspect._empty, Table): - raise TypeError( - f"return signature for {user_function} is {return_annotation} but should be ibis.expr.types.relations.Table" - ) - transformation = SubstraitTransformation.from_ibis(user_function, sources) on_demand_feature_view_obj = OnDemandFeatureView( - name=user_function.__name__, + name=name if name is not None else user_function.__name__, sources=sources, schema=schema, - feature_transformation=transformation, mode=mode, description=description, tags=tags, @@ -802,6 +806,8 @@ def decorator(user_function): write_to_online_store=write_to_online_store, entities=entities, singleton=singleton, + udf=user_function, + udf_string=udf_string, ) functools.update_wrapper( wrapper=on_demand_feature_view_obj, wrapped=user_function diff --git a/sdk/python/feast/protos/feast/core/DataSource_pb2.py b/sdk/python/feast/protos/feast/core/DataSource_pb2.py index b58c33a3830..68bee8d7609 100644 --- a/sdk/python/feast/protos/feast/core/DataSource_pb2.py +++ b/sdk/python/feast/protos/feast/core/DataSource_pb2.py @@ -19,7 +19,7 @@ from feast.protos.feast.core import Feature_pb2 as feast_dot_core_dot_Feature__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66\x65\x61st/core/DataSource.proto\x12\nfeast.core\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1b\x66\x65\x61st/core/DataFormat.proto\x1a\x17\x66\x65\x61st/types/Value.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\"\xc0\x16\n\nDataSource\x12\x0c\n\x04name\x18\x14 \x01(\t\x12\x0f\n\x07project\x18\x15 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x17 \x01(\t\x12.\n\x04tags\x18\x18 \x03(\x0b\x32 .feast.core.DataSource.TagsEntry\x12\r\n\x05owner\x18\x19 \x01(\t\x12/\n\x04type\x18\x01 \x01(\x0e\x32!.feast.core.DataSource.SourceType\x12?\n\rfield_mapping\x18\x02 \x03(\x0b\x32(.feast.core.DataSource.FieldMappingEntry\x12\x17\n\x0ftimestamp_field\x18\x03 \x01(\t\x12\x1d\n\x15\x64\x61te_partition_column\x18\x04 \x01(\t\x12 \n\x18\x63reated_timestamp_column\x18\x05 \x01(\t\x12\x1e\n\x16\x64\x61ta_source_class_type\x18\x11 \x01(\t\x12,\n\x0c\x62\x61tch_source\x18\x1a \x01(\x0b\x32\x16.feast.core.DataSource\x12/\n\x04meta\x18\x32 \x01(\x0b\x32!.feast.core.DataSource.SourceMeta\x12:\n\x0c\x66ile_options\x18\x0b \x01(\x0b\x32\".feast.core.DataSource.FileOptionsH\x00\x12\x42\n\x10\x62igquery_options\x18\x0c \x01(\x0b\x32&.feast.core.DataSource.BigQueryOptionsH\x00\x12<\n\rkafka_options\x18\r \x01(\x0b\x32#.feast.core.DataSource.KafkaOptionsH\x00\x12@\n\x0fkinesis_options\x18\x0e \x01(\x0b\x32%.feast.core.DataSource.KinesisOptionsH\x00\x12\x42\n\x10redshift_options\x18\x0f \x01(\x0b\x32&.feast.core.DataSource.RedshiftOptionsH\x00\x12I\n\x14request_data_options\x18\x12 \x01(\x0b\x32).feast.core.DataSource.RequestDataOptionsH\x00\x12\x44\n\x0e\x63ustom_options\x18\x10 \x01(\x0b\x32*.feast.core.DataSource.CustomSourceOptionsH\x00\x12\x44\n\x11snowflake_options\x18\x13 \x01(\x0b\x32\'.feast.core.DataSource.SnowflakeOptionsH\x00\x12:\n\x0cpush_options\x18\x16 \x01(\x0b\x32\".feast.core.DataSource.PushOptionsH\x00\x12<\n\rspark_options\x18\x1b \x01(\x0b\x32#.feast.core.DataSource.SparkOptionsH\x00\x12<\n\rtrino_options\x18\x1e \x01(\x0b\x32#.feast.core.DataSource.TrinoOptionsH\x00\x12>\n\x0e\x61thena_options\x18# \x01(\x0b\x32$.feast.core.DataSource.AthenaOptionsH\x00\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x33\n\x11\x46ieldMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x82\x01\n\nSourceMeta\x12:\n\x16\x65\x61rliestEventTimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14latestEventTimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x65\n\x0b\x46ileOptions\x12+\n\x0b\x66ile_format\x18\x01 \x01(\x0b\x32\x16.feast.core.FileFormat\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x1c\n\x14s3_endpoint_override\x18\x03 \x01(\t\x1a/\n\x0f\x42igQueryOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x1a,\n\x0cTrinoOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x1a\xae\x01\n\x0cKafkaOptions\x12\x1f\n\x17kafka_bootstrap_servers\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x30\n\x0emessage_format\x18\x03 \x01(\x0b\x32\x18.feast.core.StreamFormat\x12<\n\x19watermark_delay_threshold\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a\x66\n\x0eKinesisOptions\x12\x0e\n\x06region\x18\x01 \x01(\t\x12\x13\n\x0bstream_name\x18\x02 \x01(\t\x12/\n\rrecord_format\x18\x03 \x01(\x0b\x32\x18.feast.core.StreamFormat\x1aQ\n\x0fRedshiftOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x04 \x01(\t\x1aT\n\rAthenaOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x61ta_source\x18\x04 \x01(\t\x1aX\n\x10SnowflakeOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x04 \x01(\tJ\x04\x08\x05\x10\x06\x1aO\n\x0cSparkOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x04 \x01(\t\x1a,\n\x13\x43ustomSourceOptions\x12\x15\n\rconfiguration\x18\x01 \x01(\x0c\x1a\xf7\x01\n\x12RequestDataOptions\x12Z\n\x11\x64\x65precated_schema\x18\x02 \x03(\x0b\x32?.feast.core.DataSource.RequestDataOptions.DeprecatedSchemaEntry\x12)\n\x06schema\x18\x03 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x1aT\n\x15\x44\x65precatedSchemaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum:\x02\x38\x01J\x04\x08\x01\x10\x02\x1a\x13\n\x0bPushOptionsJ\x04\x08\x01\x10\x02\"\xf8\x01\n\nSourceType\x12\x0b\n\x07INVALID\x10\x00\x12\x0e\n\nBATCH_FILE\x10\x01\x12\x13\n\x0f\x42\x41TCH_SNOWFLAKE\x10\x08\x12\x12\n\x0e\x42\x41TCH_BIGQUERY\x10\x02\x12\x12\n\x0e\x42\x41TCH_REDSHIFT\x10\x05\x12\x10\n\x0cSTREAM_KAFKA\x10\x03\x12\x12\n\x0eSTREAM_KINESIS\x10\x04\x12\x11\n\rCUSTOM_SOURCE\x10\x06\x12\x12\n\x0eREQUEST_SOURCE\x10\x07\x12\x0f\n\x0bPUSH_SOURCE\x10\t\x12\x0f\n\x0b\x42\x41TCH_TRINO\x10\n\x12\x0f\n\x0b\x42\x41TCH_SPARK\x10\x0b\x12\x10\n\x0c\x42\x41TCH_ATHENA\x10\x0c\x42\t\n\x07optionsJ\x04\x08\x06\x10\x0b\x42T\n\x10\x66\x65\x61st.proto.coreB\x0f\x44\x61taSourceProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66\x65\x61st/core/DataSource.proto\x12\nfeast.core\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1b\x66\x65\x61st/core/DataFormat.proto\x1a\x17\x66\x65\x61st/types/Value.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\"\xc0\x16\n\nDataSource\x12\x0c\n\x04name\x18\x14 \x01(\t\x12\x0f\n\x07project\x18\x15 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x17 \x01(\t\x12.\n\x04tags\x18\x18 \x03(\x0b\x32 .feast.core.DataSource.TagsEntry\x12\r\n\x05owner\x18\x19 \x01(\t\x12/\n\x04type\x18\x01 \x01(\x0e\x32!.feast.core.DataSource.SourceType\x12?\n\rfield_mapping\x18\x02 \x03(\x0b\x32(.feast.core.DataSource.FieldMappingEntry\x12\x17\n\x0ftimestamp_field\x18\x03 \x01(\t\x12\x1d\n\x15\x64\x61te_partition_column\x18\x04 \x01(\t\x12 \n\x18\x63reated_timestamp_column\x18\x05 \x01(\t\x12\x1e\n\x16\x64\x61ta_source_class_type\x18\x11 \x01(\t\x12,\n\x0c\x62\x61tch_source\x18\x1a \x01(\x0b\x32\x16.feast.core.DataSource\x12/\n\x04meta\x18\x32 \x01(\x0b\x32!.feast.core.DataSource.SourceMeta\x12:\n\x0c\x66ile_options\x18\x0b \x01(\x0b\x32\".feast.core.DataSource.FileOptionsH\x00\x12\x42\n\x10\x62igquery_options\x18\x0c \x01(\x0b\x32&.feast.core.DataSource.BigQueryOptionsH\x00\x12<\n\rkafka_options\x18\r \x01(\x0b\x32#.feast.core.DataSource.KafkaOptionsH\x00\x12@\n\x0fkinesis_options\x18\x0e \x01(\x0b\x32%.feast.core.DataSource.KinesisOptionsH\x00\x12\x42\n\x10redshift_options\x18\x0f \x01(\x0b\x32&.feast.core.DataSource.RedshiftOptionsH\x00\x12I\n\x14request_data_options\x18\x12 \x01(\x0b\x32).feast.core.DataSource.RequestDataOptionsH\x00\x12\x44\n\x0e\x63ustom_options\x18\x10 \x01(\x0b\x32*.feast.core.DataSource.CustomSourceOptionsH\x00\x12\x44\n\x11snowflake_options\x18\x13 \x01(\x0b\x32\'.feast.core.DataSource.SnowflakeOptionsH\x00\x12:\n\x0cpush_options\x18\x16 \x01(\x0b\x32\".feast.core.DataSource.PushOptionsH\x00\x12<\n\rspark_options\x18\x1b \x01(\x0b\x32#.feast.core.DataSource.SparkOptionsH\x00\x12<\n\rtrino_options\x18\x1e \x01(\x0b\x32#.feast.core.DataSource.TrinoOptionsH\x00\x12>\n\x0e\x61thena_options\x18# \x01(\x0b\x32$.feast.core.DataSource.AthenaOptionsH\x00\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x33\n\x11\x46ieldMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x82\x01\n\nSourceMeta\x12:\n\x16\x65\x61rliestEventTimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14latestEventTimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x65\n\x0b\x46ileOptions\x12+\n\x0b\x66ile_format\x18\x01 \x01(\x0b\x32\x16.feast.core.FileFormat\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x1c\n\x14s3_endpoint_override\x18\x03 \x01(\t\x1a/\n\x0f\x42igQueryOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x1a,\n\x0cTrinoOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x1a\xae\x01\n\x0cKafkaOptions\x12\x1f\n\x17kafka_bootstrap_servers\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x30\n\x0emessage_format\x18\x03 \x01(\x0b\x32\x18.feast.core.StreamFormat\x12<\n\x19watermark_delay_threshold\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a\x66\n\x0eKinesisOptions\x12\x0e\n\x06region\x18\x01 \x01(\t\x12\x13\n\x0bstream_name\x18\x02 \x01(\t\x12/\n\rrecord_format\x18\x03 \x01(\x0b\x32\x18.feast.core.StreamFormat\x1aQ\n\x0fRedshiftOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x04 \x01(\t\x1aT\n\rAthenaOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x61ta_source\x18\x04 \x01(\t\x1aX\n\x10SnowflakeOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x04 \x01(\tJ\x04\x08\x05\x10\x06\x1aO\n\x0cSparkOptions\x12\r\n\x05table\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x04 \x01(\t\x1a,\n\x13\x43ustomSourceOptions\x12\x15\n\rconfiguration\x18\x01 \x01(\x0c\x1a\xf7\x01\n\x12RequestDataOptions\x12Z\n\x11\x64\x65precated_schema\x18\x02 \x03(\x0b\x32?.feast.core.DataSource.RequestDataOptions.DeprecatedSchemaEntry\x12)\n\x06schema\x18\x03 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x1aT\n\x15\x44\x65precatedSchemaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum:\x02\x38\x01J\x04\x08\x01\x10\x02\x1a\x13\n\x0bPushOptionsJ\x04\x08\x01\x10\x02\"\xf8\x01\n\nSourceType\x12\x0b\n\x07INVALID\x10\x00\x12\x0e\n\nBATCH_FILE\x10\x01\x12\x13\n\x0f\x42\x41TCH_SNOWFLAKE\x10\x08\x12\x12\n\x0e\x42\x41TCH_BIGQUERY\x10\x02\x12\x12\n\x0e\x42\x41TCH_REDSHIFT\x10\x05\x12\x10\n\x0cSTREAM_KAFKA\x10\x03\x12\x12\n\x0eSTREAM_KINESIS\x10\x04\x12\x11\n\rCUSTOM_SOURCE\x10\x06\x12\x12\n\x0eREQUEST_SOURCE\x10\x07\x12\x0f\n\x0bPUSH_SOURCE\x10\t\x12\x0f\n\x0b\x42\x41TCH_TRINO\x10\n\x12\x0f\n\x0b\x42\x41TCH_SPARK\x10\x0b\x12\x10\n\x0c\x42\x41TCH_ATHENA\x10\x0c\x42\t\n\x07optionsJ\x04\x08\x06\x10\x0b\"=\n\x0e\x44\x61taSourceList\x12+\n\x0b\x64\x61tasources\x18\x01 \x03(\x0b\x32\x16.feast.core.DataSourceBT\n\x10\x66\x65\x61st.proto.coreB\x0f\x44\x61taSourceProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -69,4 +69,6 @@ _globals['_DATASOURCE_PUSHOPTIONS']._serialized_end=2801 _globals['_DATASOURCE_SOURCETYPE']._serialized_start=2804 _globals['_DATASOURCE_SOURCETYPE']._serialized_end=3052 + _globals['_DATASOURCELIST']._serialized_start=3071 + _globals['_DATASOURCELIST']._serialized_end=3132 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/DataSource_pb2.pyi b/sdk/python/feast/protos/feast/core/DataSource_pb2.pyi index 94336638e19..aadec3fad4c 100644 --- a/sdk/python/feast/protos/feast/core/DataSource_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/DataSource_pb2.pyi @@ -557,3 +557,18 @@ class DataSource(google.protobuf.message.Message): def WhichOneof(self, oneof_group: typing_extensions.Literal["options", b"options"]) -> typing_extensions.Literal["file_options", "bigquery_options", "kafka_options", "kinesis_options", "redshift_options", "request_data_options", "custom_options", "snowflake_options", "push_options", "spark_options", "trino_options", "athena_options"] | None: ... global___DataSource = DataSource + +class DataSourceList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + DATASOURCES_FIELD_NUMBER: builtins.int + @property + def datasources(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___DataSource]: ... + def __init__( + self, + *, + datasources: collections.abc.Iterable[global___DataSource] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["datasources", b"datasources"]) -> None: ... + +global___DataSourceList = DataSourceList diff --git a/sdk/python/feast/protos/feast/core/Entity_pb2.py b/sdk/python/feast/protos/feast/core/Entity_pb2.py index 5a192854cab..2b3e7806736 100644 --- a/sdk/python/feast/protos/feast/core/Entity_pb2.py +++ b/sdk/python/feast/protos/feast/core/Entity_pb2.py @@ -16,7 +16,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66\x65\x61st/core/Entity.proto\x12\nfeast.core\x1a\x17\x66\x65\x61st/types/Value.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"V\n\x06\x45ntity\x12&\n\x04spec\x18\x01 \x01(\x0b\x32\x18.feast.core.EntitySpecV2\x12$\n\x04meta\x18\x02 \x01(\x0b\x32\x16.feast.core.EntityMeta\"\xf3\x01\n\x0c\x45ntitySpecV2\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\t \x01(\t\x12/\n\nvalue_type\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x10\n\x08join_key\x18\x04 \x01(\t\x12\x30\n\x04tags\x18\x08 \x03(\x0b\x32\".feast.core.EntitySpecV2.TagsEntry\x12\r\n\x05owner\x18\n \x01(\t\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x7f\n\nEntityMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampBP\n\x10\x66\x65\x61st.proto.coreB\x0b\x45ntityProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66\x65\x61st/core/Entity.proto\x12\nfeast.core\x1a\x17\x66\x65\x61st/types/Value.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"V\n\x06\x45ntity\x12&\n\x04spec\x18\x01 \x01(\x0b\x32\x18.feast.core.EntitySpecV2\x12$\n\x04meta\x18\x02 \x01(\x0b\x32\x16.feast.core.EntityMeta\"\xf3\x01\n\x0c\x45ntitySpecV2\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\t \x01(\t\x12/\n\nvalue_type\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x10\n\x08join_key\x18\x04 \x01(\t\x12\x30\n\x04tags\x18\x08 \x03(\x0b\x32\".feast.core.EntitySpecV2.TagsEntry\x12\r\n\x05owner\x18\n \x01(\t\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x7f\n\nEntityMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\nEntityList\x12$\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x12.feast.core.EntityBP\n\x10\x66\x65\x61st.proto.coreB\x0b\x45ntityProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,4 +34,6 @@ _globals['_ENTITYSPECV2_TAGSENTRY']._serialized_end=429 _globals['_ENTITYMETA']._serialized_start=431 _globals['_ENTITYMETA']._serialized_end=558 + _globals['_ENTITYLIST']._serialized_start=560 + _globals['_ENTITYLIST']._serialized_end=610 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/Entity_pb2.pyi b/sdk/python/feast/protos/feast/core/Entity_pb2.pyi index 732b3e10326..025817edfee 100644 --- a/sdk/python/feast/protos/feast/core/Entity_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/Entity_pb2.pyi @@ -128,3 +128,18 @@ class EntityMeta(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["created_timestamp", b"created_timestamp", "last_updated_timestamp", b"last_updated_timestamp"]) -> None: ... global___EntityMeta = EntityMeta + +class EntityList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + ENTITIES_FIELD_NUMBER: builtins.int + @property + def entities(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Entity]: ... + def __init__( + self, + *, + entities: collections.abc.Iterable[global___Entity] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["entities", b"entities"]) -> None: ... + +global___EntityList = EntityList diff --git a/sdk/python/feast/protos/feast/core/FeatureService_pb2.py b/sdk/python/feast/protos/feast/core/FeatureService_pb2.py index cf6ac46ac54..7ef36079691 100644 --- a/sdk/python/feast/protos/feast/core/FeatureService_pb2.py +++ b/sdk/python/feast/protos/feast/core/FeatureService_pb2.py @@ -16,7 +16,7 @@ from feast.protos.feast.core import FeatureViewProjection_pb2 as feast_dot_core_dot_FeatureViewProjection__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x66\x65\x61st/core/FeatureService.proto\x12\nfeast.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a&feast/core/FeatureViewProjection.proto\"l\n\x0e\x46\x65\x61tureService\x12,\n\x04spec\x18\x01 \x01(\x0b\x32\x1e.feast.core.FeatureServiceSpec\x12,\n\x04meta\x18\x02 \x01(\x0b\x32\x1e.feast.core.FeatureServiceMeta\"\xa4\x02\n\x12\x46\x65\x61tureServiceSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x33\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32!.feast.core.FeatureViewProjection\x12\x36\n\x04tags\x18\x04 \x03(\x0b\x32(.feast.core.FeatureServiceSpec.TagsEntry\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x31\n\x0elogging_config\x18\x07 \x01(\x0b\x32\x19.feast.core.LoggingConfig\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x87\x01\n\x12\x46\x65\x61tureServiceMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x9a\x07\n\rLoggingConfig\x12\x13\n\x0bsample_rate\x18\x01 \x01(\x02\x12\x45\n\x10\x66ile_destination\x18\x03 \x01(\x0b\x32).feast.core.LoggingConfig.FileDestinationH\x00\x12M\n\x14\x62igquery_destination\x18\x04 \x01(\x0b\x32-.feast.core.LoggingConfig.BigQueryDestinationH\x00\x12M\n\x14redshift_destination\x18\x05 \x01(\x0b\x32-.feast.core.LoggingConfig.RedshiftDestinationH\x00\x12O\n\x15snowflake_destination\x18\x06 \x01(\x0b\x32..feast.core.LoggingConfig.SnowflakeDestinationH\x00\x12I\n\x12\x63ustom_destination\x18\x07 \x01(\x0b\x32+.feast.core.LoggingConfig.CustomDestinationH\x00\x12I\n\x12\x61thena_destination\x18\x08 \x01(\x0b\x32+.feast.core.LoggingConfig.AthenaDestinationH\x00\x1aS\n\x0f\x46ileDestination\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x1c\n\x14s3_endpoint_override\x18\x02 \x01(\t\x12\x14\n\x0cpartition_by\x18\x03 \x03(\t\x1a(\n\x13\x42igQueryDestination\x12\x11\n\ttable_ref\x18\x01 \x01(\t\x1a)\n\x13RedshiftDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a\'\n\x11\x41thenaDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a*\n\x14SnowflakeDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a\x99\x01\n\x11\x43ustomDestination\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12G\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x37.feast.core.LoggingConfig.CustomDestination.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stinationBX\n\x10\x66\x65\x61st.proto.coreB\x13\x46\x65\x61tureServiceProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x66\x65\x61st/core/FeatureService.proto\x12\nfeast.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a&feast/core/FeatureViewProjection.proto\"l\n\x0e\x46\x65\x61tureService\x12,\n\x04spec\x18\x01 \x01(\x0b\x32\x1e.feast.core.FeatureServiceSpec\x12,\n\x04meta\x18\x02 \x01(\x0b\x32\x1e.feast.core.FeatureServiceMeta\"\xa4\x02\n\x12\x46\x65\x61tureServiceSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x33\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32!.feast.core.FeatureViewProjection\x12\x36\n\x04tags\x18\x04 \x03(\x0b\x32(.feast.core.FeatureServiceSpec.TagsEntry\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x31\n\x0elogging_config\x18\x07 \x01(\x0b\x32\x19.feast.core.LoggingConfig\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x87\x01\n\x12\x46\x65\x61tureServiceMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd1\x08\n\rLoggingConfig\x12\x13\n\x0bsample_rate\x18\x01 \x01(\x02\x12\x45\n\x10\x66ile_destination\x18\x03 \x01(\x0b\x32).feast.core.LoggingConfig.FileDestinationH\x00\x12M\n\x14\x62igquery_destination\x18\x04 \x01(\x0b\x32-.feast.core.LoggingConfig.BigQueryDestinationH\x00\x12M\n\x14redshift_destination\x18\x05 \x01(\x0b\x32-.feast.core.LoggingConfig.RedshiftDestinationH\x00\x12O\n\x15snowflake_destination\x18\x06 \x01(\x0b\x32..feast.core.LoggingConfig.SnowflakeDestinationH\x00\x12I\n\x12\x63ustom_destination\x18\x07 \x01(\x0b\x32+.feast.core.LoggingConfig.CustomDestinationH\x00\x12I\n\x12\x61thena_destination\x18\x08 \x01(\x0b\x32+.feast.core.LoggingConfig.AthenaDestinationH\x00\x12`\n\x1e\x63ouchbase_columnar_destination\x18\t \x01(\x0b\x32\x36.feast.core.LoggingConfig.CouchbaseColumnarDestinationH\x00\x1aS\n\x0f\x46ileDestination\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x1c\n\x14s3_endpoint_override\x18\x02 \x01(\t\x12\x14\n\x0cpartition_by\x18\x03 \x03(\t\x1a(\n\x13\x42igQueryDestination\x12\x11\n\ttable_ref\x18\x01 \x01(\t\x1a)\n\x13RedshiftDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a\'\n\x11\x41thenaDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a*\n\x14SnowflakeDestination\x12\x12\n\ntable_name\x18\x01 \x01(\t\x1a\x99\x01\n\x11\x43ustomDestination\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12G\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x37.feast.core.LoggingConfig.CustomDestination.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aS\n\x1c\x43ouchbaseColumnarDestination\x12\x10\n\x08\x64\x61tabase\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x12\n\ncollection\x18\x03 \x01(\tB\r\n\x0b\x64\x65stination\"I\n\x12\x46\x65\x61tureServiceList\x12\x33\n\x0f\x66\x65\x61tureservices\x18\x01 \x03(\x0b\x32\x1a.feast.core.FeatureServiceBX\n\x10\x66\x65\x61st.proto.coreB\x13\x46\x65\x61tureServiceProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -37,19 +37,23 @@ _globals['_FEATURESERVICEMETA']._serialized_start=526 _globals['_FEATURESERVICEMETA']._serialized_end=661 _globals['_LOGGINGCONFIG']._serialized_start=664 - _globals['_LOGGINGCONFIG']._serialized_end=1586 - _globals['_LOGGINGCONFIG_FILEDESTINATION']._serialized_start=1162 - _globals['_LOGGINGCONFIG_FILEDESTINATION']._serialized_end=1245 - _globals['_LOGGINGCONFIG_BIGQUERYDESTINATION']._serialized_start=1247 - _globals['_LOGGINGCONFIG_BIGQUERYDESTINATION']._serialized_end=1287 - _globals['_LOGGINGCONFIG_REDSHIFTDESTINATION']._serialized_start=1289 - _globals['_LOGGINGCONFIG_REDSHIFTDESTINATION']._serialized_end=1330 - _globals['_LOGGINGCONFIG_ATHENADESTINATION']._serialized_start=1332 - _globals['_LOGGINGCONFIG_ATHENADESTINATION']._serialized_end=1371 - _globals['_LOGGINGCONFIG_SNOWFLAKEDESTINATION']._serialized_start=1373 - _globals['_LOGGINGCONFIG_SNOWFLAKEDESTINATION']._serialized_end=1415 - _globals['_LOGGINGCONFIG_CUSTOMDESTINATION']._serialized_start=1418 - _globals['_LOGGINGCONFIG_CUSTOMDESTINATION']._serialized_end=1571 - _globals['_LOGGINGCONFIG_CUSTOMDESTINATION_CONFIGENTRY']._serialized_start=1526 - _globals['_LOGGINGCONFIG_CUSTOMDESTINATION_CONFIGENTRY']._serialized_end=1571 + _globals['_LOGGINGCONFIG']._serialized_end=1769 + _globals['_LOGGINGCONFIG_FILEDESTINATION']._serialized_start=1260 + _globals['_LOGGINGCONFIG_FILEDESTINATION']._serialized_end=1343 + _globals['_LOGGINGCONFIG_BIGQUERYDESTINATION']._serialized_start=1345 + _globals['_LOGGINGCONFIG_BIGQUERYDESTINATION']._serialized_end=1385 + _globals['_LOGGINGCONFIG_REDSHIFTDESTINATION']._serialized_start=1387 + _globals['_LOGGINGCONFIG_REDSHIFTDESTINATION']._serialized_end=1428 + _globals['_LOGGINGCONFIG_ATHENADESTINATION']._serialized_start=1430 + _globals['_LOGGINGCONFIG_ATHENADESTINATION']._serialized_end=1469 + _globals['_LOGGINGCONFIG_SNOWFLAKEDESTINATION']._serialized_start=1471 + _globals['_LOGGINGCONFIG_SNOWFLAKEDESTINATION']._serialized_end=1513 + _globals['_LOGGINGCONFIG_CUSTOMDESTINATION']._serialized_start=1516 + _globals['_LOGGINGCONFIG_CUSTOMDESTINATION']._serialized_end=1669 + _globals['_LOGGINGCONFIG_CUSTOMDESTINATION_CONFIGENTRY']._serialized_start=1624 + _globals['_LOGGINGCONFIG_CUSTOMDESTINATION_CONFIGENTRY']._serialized_end=1669 + _globals['_LOGGINGCONFIG_COUCHBASECOLUMNARDESTINATION']._serialized_start=1671 + _globals['_LOGGINGCONFIG_COUCHBASECOLUMNARDESTINATION']._serialized_end=1754 + _globals['_FEATURESERVICELIST']._serialized_start=1771 + _globals['_FEATURESERVICELIST']._serialized_end=1844 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/FeatureService_pb2.pyi b/sdk/python/feast/protos/feast/core/FeatureService_pb2.pyi index b3305b72df9..6d5879e52cb 100644 --- a/sdk/python/feast/protos/feast/core/FeatureService_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/FeatureService_pb2.pyi @@ -228,6 +228,27 @@ class LoggingConfig(google.protobuf.message.Message): ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["config", b"config", "kind", b"kind"]) -> None: ... + class CouchbaseColumnarDestination(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + DATABASE_FIELD_NUMBER: builtins.int + SCOPE_FIELD_NUMBER: builtins.int + COLLECTION_FIELD_NUMBER: builtins.int + database: builtins.str + """Destination database name""" + scope: builtins.str + """Destination scope name""" + collection: builtins.str + """Destination collection name""" + def __init__( + self, + *, + database: builtins.str = ..., + scope: builtins.str = ..., + collection: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["collection", b"collection", "database", b"database", "scope", b"scope"]) -> None: ... + SAMPLE_RATE_FIELD_NUMBER: builtins.int FILE_DESTINATION_FIELD_NUMBER: builtins.int BIGQUERY_DESTINATION_FIELD_NUMBER: builtins.int @@ -235,6 +256,7 @@ class LoggingConfig(google.protobuf.message.Message): SNOWFLAKE_DESTINATION_FIELD_NUMBER: builtins.int CUSTOM_DESTINATION_FIELD_NUMBER: builtins.int ATHENA_DESTINATION_FIELD_NUMBER: builtins.int + COUCHBASE_COLUMNAR_DESTINATION_FIELD_NUMBER: builtins.int sample_rate: builtins.float @property def file_destination(self) -> global___LoggingConfig.FileDestination: ... @@ -248,6 +270,8 @@ class LoggingConfig(google.protobuf.message.Message): def custom_destination(self) -> global___LoggingConfig.CustomDestination: ... @property def athena_destination(self) -> global___LoggingConfig.AthenaDestination: ... + @property + def couchbase_columnar_destination(self) -> global___LoggingConfig.CouchbaseColumnarDestination: ... def __init__( self, *, @@ -258,9 +282,25 @@ class LoggingConfig(google.protobuf.message.Message): snowflake_destination: global___LoggingConfig.SnowflakeDestination | None = ..., custom_destination: global___LoggingConfig.CustomDestination | None = ..., athena_destination: global___LoggingConfig.AthenaDestination | None = ..., + couchbase_columnar_destination: global___LoggingConfig.CouchbaseColumnarDestination | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["athena_destination", b"athena_destination", "bigquery_destination", b"bigquery_destination", "custom_destination", b"custom_destination", "destination", b"destination", "file_destination", b"file_destination", "redshift_destination", b"redshift_destination", "snowflake_destination", b"snowflake_destination"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["athena_destination", b"athena_destination", "bigquery_destination", b"bigquery_destination", "custom_destination", b"custom_destination", "destination", b"destination", "file_destination", b"file_destination", "redshift_destination", b"redshift_destination", "sample_rate", b"sample_rate", "snowflake_destination", b"snowflake_destination"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["destination", b"destination"]) -> typing_extensions.Literal["file_destination", "bigquery_destination", "redshift_destination", "snowflake_destination", "custom_destination", "athena_destination"] | None: ... + def HasField(self, field_name: typing_extensions.Literal["athena_destination", b"athena_destination", "bigquery_destination", b"bigquery_destination", "couchbase_columnar_destination", b"couchbase_columnar_destination", "custom_destination", b"custom_destination", "destination", b"destination", "file_destination", b"file_destination", "redshift_destination", b"redshift_destination", "snowflake_destination", b"snowflake_destination"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["athena_destination", b"athena_destination", "bigquery_destination", b"bigquery_destination", "couchbase_columnar_destination", b"couchbase_columnar_destination", "custom_destination", b"custom_destination", "destination", b"destination", "file_destination", b"file_destination", "redshift_destination", b"redshift_destination", "sample_rate", b"sample_rate", "snowflake_destination", b"snowflake_destination"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["destination", b"destination"]) -> typing_extensions.Literal["file_destination", "bigquery_destination", "redshift_destination", "snowflake_destination", "custom_destination", "athena_destination", "couchbase_columnar_destination"] | None: ... global___LoggingConfig = LoggingConfig + +class FeatureServiceList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + FEATURESERVICES_FIELD_NUMBER: builtins.int + @property + def featureservices(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___FeatureService]: ... + def __init__( + self, + *, + featureservices: collections.abc.Iterable[global___FeatureService] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["featureservices", b"featureservices"]) -> None: ... + +global___FeatureServiceList = FeatureServiceList diff --git a/sdk/python/feast/protos/feast/core/FeatureView_pb2.py b/sdk/python/feast/protos/feast/core/FeatureView_pb2.py index f1480593d9a..80d04c1ec3f 100644 --- a/sdk/python/feast/protos/feast/core/FeatureView_pb2.py +++ b/sdk/python/feast/protos/feast/core/FeatureView_pb2.py @@ -18,7 +18,7 @@ from feast.protos.feast.core import Feature_pb2 as feast_dot_core_dot_Feature__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66\x65\x61st/core/FeatureView.proto\x12\nfeast.core\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\"c\n\x0b\x46\x65\x61tureView\x12)\n\x04spec\x18\x01 \x01(\x0b\x32\x1b.feast.core.FeatureViewSpec\x12)\n\x04meta\x18\x02 \x01(\x0b\x32\x1b.feast.core.FeatureViewMeta\"\xbd\x03\n\x0f\x46\x65\x61tureViewSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x10\n\x08\x65ntities\x18\x03 \x03(\t\x12+\n\x08\x66\x65\x61tures\x18\x04 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x31\n\x0e\x65ntity_columns\x18\x0c \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x13\n\x0b\x64\x65scription\x18\n \x01(\t\x12\x33\n\x04tags\x18\x05 \x03(\x0b\x32%.feast.core.FeatureViewSpec.TagsEntry\x12\r\n\x05owner\x18\x0b \x01(\t\x12&\n\x03ttl\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12,\n\x0c\x62\x61tch_source\x18\x07 \x01(\x0b\x32\x16.feast.core.DataSource\x12-\n\rstream_source\x18\t \x01(\x0b\x32\x16.feast.core.DataSource\x12\x0e\n\x06online\x18\x08 \x01(\x08\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcc\x01\n\x0f\x46\x65\x61tureViewMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x19materialization_intervals\x18\x03 \x03(\x0b\x32#.feast.core.MaterializationInterval\"w\n\x17MaterializationInterval\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampBU\n\x10\x66\x65\x61st.proto.coreB\x10\x46\x65\x61tureViewProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66\x65\x61st/core/FeatureView.proto\x12\nfeast.core\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\"c\n\x0b\x46\x65\x61tureView\x12)\n\x04spec\x18\x01 \x01(\x0b\x32\x1b.feast.core.FeatureViewSpec\x12)\n\x04meta\x18\x02 \x01(\x0b\x32\x1b.feast.core.FeatureViewMeta\"\xbd\x03\n\x0f\x46\x65\x61tureViewSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x10\n\x08\x65ntities\x18\x03 \x03(\t\x12+\n\x08\x66\x65\x61tures\x18\x04 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x31\n\x0e\x65ntity_columns\x18\x0c \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x13\n\x0b\x64\x65scription\x18\n \x01(\t\x12\x33\n\x04tags\x18\x05 \x03(\x0b\x32%.feast.core.FeatureViewSpec.TagsEntry\x12\r\n\x05owner\x18\x0b \x01(\t\x12&\n\x03ttl\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12,\n\x0c\x62\x61tch_source\x18\x07 \x01(\x0b\x32\x16.feast.core.DataSource\x12-\n\rstream_source\x18\t \x01(\x0b\x32\x16.feast.core.DataSource\x12\x0e\n\x06online\x18\x08 \x01(\x08\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcc\x01\n\x0f\x46\x65\x61tureViewMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x19materialization_intervals\x18\x03 \x03(\x0b\x32#.feast.core.MaterializationInterval\"w\n\x17MaterializationInterval\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"@\n\x0f\x46\x65\x61tureViewList\x12-\n\x0c\x66\x65\x61tureviews\x18\x01 \x03(\x0b\x32\x17.feast.core.FeatureViewBU\n\x10\x66\x65\x61st.proto.coreB\x10\x46\x65\x61tureViewProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -38,4 +38,6 @@ _globals['_FEATUREVIEWMETA']._serialized_end=918 _globals['_MATERIALIZATIONINTERVAL']._serialized_start=920 _globals['_MATERIALIZATIONINTERVAL']._serialized_end=1039 + _globals['_FEATUREVIEWLIST']._serialized_start=1041 + _globals['_FEATUREVIEWLIST']._serialized_end=1105 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/FeatureView_pb2.pyi b/sdk/python/feast/protos/feast/core/FeatureView_pb2.pyi index e1d4e2dfee8..57158fc2c6c 100644 --- a/sdk/python/feast/protos/feast/core/FeatureView_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/FeatureView_pb2.pyi @@ -192,3 +192,18 @@ class MaterializationInterval(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["end_time", b"end_time", "start_time", b"start_time"]) -> None: ... global___MaterializationInterval = MaterializationInterval + +class FeatureViewList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + FEATUREVIEWS_FIELD_NUMBER: builtins.int + @property + def featureviews(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___FeatureView]: ... + def __init__( + self, + *, + featureviews: collections.abc.Iterable[global___FeatureView] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["featureviews", b"featureviews"]) -> None: ... + +global___FeatureViewList = FeatureViewList diff --git a/sdk/python/feast/protos/feast/core/Feature_pb2.py b/sdk/python/feast/protos/feast/core/Feature_pb2.py index dd7c6008ef1..6b1081fe811 100644 --- a/sdk/python/feast/protos/feast/core/Feature_pb2.py +++ b/sdk/python/feast/protos/feast/core/Feature_pb2.py @@ -15,7 +15,7 @@ from feast.protos.feast.types import Value_pb2 as feast_dot_types_dot_Value__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66\x65\x61st/core/Feature.proto\x12\nfeast.core\x1a\x17\x66\x65\x61st/types/Value.proto\"\xc3\x01\n\rFeatureSpecV2\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\nvalue_type\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum\x12\x31\n\x04tags\x18\x03 \x03(\x0b\x32#.feast.core.FeatureSpecV2.TagsEntry\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42Q\n\x10\x66\x65\x61st.proto.coreB\x0c\x46\x65\x61tureProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66\x65\x61st/core/Feature.proto\x12\nfeast.core\x1a\x17\x66\x65\x61st/types/Value.proto\"\xf7\x01\n\rFeatureSpecV2\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\nvalue_type\x18\x02 \x01(\x0e\x32\x1b.feast.types.ValueType.Enum\x12\x31\n\x04tags\x18\x03 \x03(\x0b\x32#.feast.core.FeatureSpecV2.TagsEntry\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x14\n\x0cvector_index\x18\x05 \x01(\x08\x12\x1c\n\x14vector_search_metric\x18\x06 \x01(\t\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42Q\n\x10\x66\x65\x61st.proto.coreB\x0c\x46\x65\x61tureProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -26,7 +26,7 @@ _globals['_FEATURESPECV2_TAGSENTRY']._options = None _globals['_FEATURESPECV2_TAGSENTRY']._serialized_options = b'8\001' _globals['_FEATURESPECV2']._serialized_start=66 - _globals['_FEATURESPECV2']._serialized_end=261 - _globals['_FEATURESPECV2_TAGSENTRY']._serialized_start=218 - _globals['_FEATURESPECV2_TAGSENTRY']._serialized_end=261 + _globals['_FEATURESPECV2']._serialized_end=313 + _globals['_FEATURESPECV2_TAGSENTRY']._serialized_start=270 + _globals['_FEATURESPECV2_TAGSENTRY']._serialized_end=313 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/Feature_pb2.pyi b/sdk/python/feast/protos/feast/core/Feature_pb2.pyi index f4235b0965b..451f1aa61ce 100644 --- a/sdk/python/feast/protos/feast/core/Feature_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/Feature_pb2.pyi @@ -53,6 +53,8 @@ class FeatureSpecV2(google.protobuf.message.Message): VALUE_TYPE_FIELD_NUMBER: builtins.int TAGS_FIELD_NUMBER: builtins.int DESCRIPTION_FIELD_NUMBER: builtins.int + VECTOR_INDEX_FIELD_NUMBER: builtins.int + VECTOR_SEARCH_METRIC_FIELD_NUMBER: builtins.int name: builtins.str """Name of the feature. Not updatable.""" value_type: feast.types.Value_pb2.ValueType.Enum.ValueType @@ -62,6 +64,10 @@ class FeatureSpecV2(google.protobuf.message.Message): """Tags for user defined metadata on a feature""" description: builtins.str """Description of the feature.""" + vector_index: builtins.bool + """Field indicating the vector will be indexed for vector similarity search""" + vector_search_metric: builtins.str + """Metric used for vector similarity search.""" def __init__( self, *, @@ -69,7 +75,9 @@ class FeatureSpecV2(google.protobuf.message.Message): value_type: feast.types.Value_pb2.ValueType.Enum.ValueType = ..., tags: collections.abc.Mapping[builtins.str, builtins.str] | None = ..., description: builtins.str = ..., + vector_index: builtins.bool = ..., + vector_search_metric: builtins.str = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["description", b"description", "name", b"name", "tags", b"tags", "value_type", b"value_type"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["description", b"description", "name", b"name", "tags", b"tags", "value_type", b"value_type", "vector_index", b"vector_index", "vector_search_metric", b"vector_search_metric"]) -> None: ... global___FeatureSpecV2 = FeatureSpecV2 diff --git a/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.py b/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.py index 020515a6b89..926b54df288 100644 --- a/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.py +++ b/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.py @@ -20,7 +20,7 @@ from feast.protos.feast.core import Transformation_pb2 as feast_dot_core_dot_Transformation__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$feast/core/OnDemandFeatureView.proto\x12\nfeast.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1c\x66\x65\x61st/core/FeatureView.proto\x1a&feast/core/FeatureViewProjection.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x1f\x66\x65\x61st/core/Transformation.proto\"{\n\x13OnDemandFeatureView\x12\x31\n\x04spec\x18\x01 \x01(\x0b\x32#.feast.core.OnDemandFeatureViewSpec\x12\x31\n\x04meta\x18\x02 \x01(\x0b\x32#.feast.core.OnDemandFeatureViewMeta\"\x90\x05\n\x17OnDemandFeatureViewSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12+\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x41\n\x07sources\x18\x04 \x03(\x0b\x32\x30.feast.core.OnDemandFeatureViewSpec.SourcesEntry\x12\x42\n\x15user_defined_function\x18\x05 \x01(\x0b\x32\x1f.feast.core.UserDefinedFunctionB\x02\x18\x01\x12\x43\n\x16\x66\x65\x61ture_transformation\x18\n \x01(\x0b\x32#.feast.core.FeatureTransformationV2\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12;\n\x04tags\x18\x07 \x03(\x0b\x32-.feast.core.OnDemandFeatureViewSpec.TagsEntry\x12\r\n\x05owner\x18\x08 \x01(\t\x12\x0c\n\x04mode\x18\x0b \x01(\t\x12\x1d\n\x15write_to_online_store\x18\x0c \x01(\x08\x12\x10\n\x08\x65ntities\x18\r \x03(\t\x12\x31\n\x0e\x65ntity_columns\x18\x0e \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x11\n\tsingleton\x18\x0f \x01(\x08\x1aJ\n\x0cSourcesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.feast.core.OnDemandSource:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8c\x01\n\x17OnDemandFeatureViewMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc8\x01\n\x0eOnDemandSource\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x44\n\x17\x66\x65\x61ture_view_projection\x18\x03 \x01(\x0b\x32!.feast.core.FeatureViewProjectionH\x00\x12\x35\n\x13request_data_source\x18\x02 \x01(\x0b\x32\x16.feast.core.DataSourceH\x00\x42\x08\n\x06source\"H\n\x13UserDefinedFunction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\x12\x11\n\tbody_text\x18\x03 \x01(\t:\x02\x18\x01\x42]\n\x10\x66\x65\x61st.proto.coreB\x18OnDemandFeatureViewProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$feast/core/OnDemandFeatureView.proto\x12\nfeast.core\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1c\x66\x65\x61st/core/FeatureView.proto\x1a&feast/core/FeatureViewProjection.proto\x1a\x18\x66\x65\x61st/core/Feature.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x1f\x66\x65\x61st/core/Transformation.proto\"{\n\x13OnDemandFeatureView\x12\x31\n\x04spec\x18\x01 \x01(\x0b\x32#.feast.core.OnDemandFeatureViewSpec\x12\x31\n\x04meta\x18\x02 \x01(\x0b\x32#.feast.core.OnDemandFeatureViewMeta\"\x90\x05\n\x17OnDemandFeatureViewSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12+\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x41\n\x07sources\x18\x04 \x03(\x0b\x32\x30.feast.core.OnDemandFeatureViewSpec.SourcesEntry\x12\x42\n\x15user_defined_function\x18\x05 \x01(\x0b\x32\x1f.feast.core.UserDefinedFunctionB\x02\x18\x01\x12\x43\n\x16\x66\x65\x61ture_transformation\x18\n \x01(\x0b\x32#.feast.core.FeatureTransformationV2\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12;\n\x04tags\x18\x07 \x03(\x0b\x32-.feast.core.OnDemandFeatureViewSpec.TagsEntry\x12\r\n\x05owner\x18\x08 \x01(\t\x12\x0c\n\x04mode\x18\x0b \x01(\t\x12\x1d\n\x15write_to_online_store\x18\x0c \x01(\x08\x12\x10\n\x08\x65ntities\x18\r \x03(\t\x12\x31\n\x0e\x65ntity_columns\x18\x0e \x03(\x0b\x32\x19.feast.core.FeatureSpecV2\x12\x11\n\tsingleton\x18\x0f \x01(\x08\x1aJ\n\x0cSourcesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.feast.core.OnDemandSource:\x02\x38\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8c\x01\n\x17OnDemandFeatureViewMeta\x12\x35\n\x11\x63reated_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16last_updated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc8\x01\n\x0eOnDemandSource\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x44\n\x17\x66\x65\x61ture_view_projection\x18\x03 \x01(\x0b\x32!.feast.core.FeatureViewProjectionH\x00\x12\x35\n\x13request_data_source\x18\x02 \x01(\x0b\x32\x16.feast.core.DataSourceH\x00\x42\x08\n\x06source\"H\n\x13UserDefinedFunction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\x12\x11\n\tbody_text\x18\x03 \x01(\t:\x02\x18\x01\"X\n\x17OnDemandFeatureViewList\x12=\n\x14ondemandfeatureviews\x18\x01 \x03(\x0b\x32\x1f.feast.core.OnDemandFeatureViewB]\n\x10\x66\x65\x61st.proto.coreB\x18OnDemandFeatureViewProtoZ/github.com/feast-dev/feast/go/protos/feast/coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -50,4 +50,6 @@ _globals['_ONDEMANDSOURCE']._serialized_end=1371 _globals['_USERDEFINEDFUNCTION']._serialized_start=1373 _globals['_USERDEFINEDFUNCTION']._serialized_end=1445 + _globals['_ONDEMANDFEATUREVIEWLIST']._serialized_start=1447 + _globals['_ONDEMANDFEATUREVIEWLIST']._serialized_end=1535 # @@protoc_insertion_point(module_scope) diff --git a/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.pyi b/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.pyi index 3380779c97e..c9fca2f550d 100644 --- a/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.pyi +++ b/sdk/python/feast/protos/feast/core/OnDemandFeatureView_pb2.pyi @@ -233,3 +233,18 @@ class UserDefinedFunction(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["body", b"body", "body_text", b"body_text", "name", b"name"]) -> None: ... global___UserDefinedFunction = UserDefinedFunction + +class OnDemandFeatureViewList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + ONDEMANDFEATUREVIEWS_FIELD_NUMBER: builtins.int + @property + def ondemandfeatureviews(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___OnDemandFeatureView]: ... + def __init__( + self, + *, + ondemandfeatureviews: collections.abc.Iterable[global___OnDemandFeatureView] | None = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ondemandfeatureviews", b"ondemandfeatureviews"]) -> None: ... + +global___OnDemandFeatureViewList = OnDemandFeatureViewList diff --git a/sdk/python/feast/protos/feast/registry/RegistryServer_pb2.py b/sdk/python/feast/protos/feast/registry/RegistryServer_pb2.py index e0cae3da4b7..2d5f7b020ab 100644 --- a/sdk/python/feast/protos/feast/registry/RegistryServer_pb2.py +++ b/sdk/python/feast/protos/feast/registry/RegistryServer_pb2.py @@ -28,13 +28,14 @@ from feast.protos.feast.core import Project_pb2 as feast_dot_core_dot_Project__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#feast/registry/RegistryServer.proto\x12\x0e\x66\x65\x61st.registry\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19\x66\x65\x61st/core/Registry.proto\x1a\x17\x66\x65\x61st/core/Entity.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x1c\x66\x65\x61st/core/FeatureView.proto\x1a\"feast/core/StreamFeatureView.proto\x1a$feast/core/OnDemandFeatureView.proto\x1a\x1f\x66\x65\x61st/core/FeatureService.proto\x1a\x1d\x66\x65\x61st/core/SavedDataset.proto\x1a\"feast/core/ValidationProfile.proto\x1a\x1c\x66\x65\x61st/core/InfraObject.proto\x1a\x1b\x66\x65\x61st/core/Permission.proto\x1a\x18\x66\x65\x61st/core/Project.proto\"!\n\x0eRefreshRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\"W\n\x12UpdateInfraRequest\x12 \n\x05infra\x18\x01 \x01(\x0b\x32\x11.feast.core.Infra\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"7\n\x0fGetInfraRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"B\n\x1aListProjectMetadataRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"T\n\x1bListProjectMetadataResponse\x12\x35\n\x10project_metadata\x18\x01 \x03(\x0b\x32\x1b.feast.core.ProjectMetadata\"\xcb\x01\n\x1b\x41pplyMaterializationRequest\x12-\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureView\x12\x0f\n\x07project\x18\x02 \x01(\t\x12.\n\nstart_date\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_date\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06\x63ommit\x18\x05 \x01(\x08\"Y\n\x12\x41pplyEntityRequest\x12\"\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x12.feast.core.Entity\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"F\n\x10GetEntityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xa5\x01\n\x13ListEntitiesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12;\n\x04tags\x18\x03 \x03(\x0b\x32-.feast.registry.ListEntitiesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"<\n\x14ListEntitiesResponse\x12$\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x12.feast.core.Entity\"D\n\x13\x44\x65leteEntityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"f\n\x16\x41pplyDataSourceRequest\x12+\n\x0b\x64\x61ta_source\x18\x01 \x01(\x0b\x32\x16.feast.core.DataSource\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"J\n\x14GetDataSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xab\x01\n\x16ListDataSourcesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12>\n\x04tags\x18\x03 \x03(\x0b\x32\x30.feast.registry.ListDataSourcesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"G\n\x17ListDataSourcesResponse\x12,\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x16.feast.core.DataSource\"H\n\x17\x44\x65leteDataSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\x81\x02\n\x17\x41pplyFeatureViewRequest\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x41\n\x16on_demand_feature_view\x18\x02 \x01(\x0b\x32\x1f.feast.core.OnDemandFeatureViewH\x00\x12<\n\x13stream_feature_view\x18\x03 \x01(\x0b\x32\x1d.feast.core.StreamFeatureViewH\x00\x12\x0f\n\x07project\x18\x04 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x05 \x01(\x08\x42\x13\n\x11\x62\x61se_feature_view\"K\n\x15GetFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xad\x01\n\x17ListFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12?\n\x04tags\x18\x03 \x03(\x0b\x32\x31.feast.registry.ListFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x18ListFeatureViewsResponse\x12.\n\rfeature_views\x18\x01 \x03(\x0b\x32\x17.feast.core.FeatureView\"I\n\x18\x44\x65leteFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\xd6\x01\n\x0e\x41nyFeatureView\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x41\n\x16on_demand_feature_view\x18\x02 \x01(\x0b\x32\x1f.feast.core.OnDemandFeatureViewH\x00\x12<\n\x13stream_feature_view\x18\x03 \x01(\x0b\x32\x1d.feast.core.StreamFeatureViewH\x00\x42\x12\n\x10\x61ny_feature_view\"N\n\x18GetAnyFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"U\n\x19GetAnyFeatureViewResponse\x12\x38\n\x10\x61ny_feature_view\x18\x01 \x01(\x0b\x32\x1e.feast.registry.AnyFeatureView\"\xb3\x01\n\x1aListAllFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x42\n\x04tags\x18\x03 \x03(\x0b\x32\x34.feast.registry.ListAllFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x1bListAllFeatureViewsResponse\x12\x35\n\rfeature_views\x18\x01 \x03(\x0b\x32\x1e.feast.registry.AnyFeatureView\"Q\n\x1bGetStreamFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xb9\x01\n\x1dListStreamFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x45\n\x04tags\x18\x03 \x03(\x0b\x32\x37.feast.registry.ListStreamFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"]\n\x1eListStreamFeatureViewsResponse\x12;\n\x14stream_feature_views\x18\x01 \x03(\x0b\x32\x1d.feast.core.StreamFeatureView\"S\n\x1dGetOnDemandFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xbd\x01\n\x1fListOnDemandFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12G\n\x04tags\x18\x03 \x03(\x0b\x32\x39.feast.registry.ListOnDemandFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"d\n ListOnDemandFeatureViewsResponse\x12@\n\x17on_demand_feature_views\x18\x01 \x03(\x0b\x32\x1f.feast.core.OnDemandFeatureView\"r\n\x1a\x41pplyFeatureServiceRequest\x12\x33\n\x0f\x66\x65\x61ture_service\x18\x01 \x01(\x0b\x32\x1a.feast.core.FeatureService\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"N\n\x18GetFeatureServiceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xb3\x01\n\x1aListFeatureServicesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x42\n\x04tags\x18\x03 \x03(\x0b\x32\x34.feast.registry.ListFeatureServicesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x1bListFeatureServicesResponse\x12\x34\n\x10\x66\x65\x61ture_services\x18\x01 \x03(\x0b\x32\x1a.feast.core.FeatureService\"L\n\x1b\x44\x65leteFeatureServiceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"l\n\x18\x41pplySavedDatasetRequest\x12/\n\rsaved_dataset\x18\x01 \x01(\x0b\x32\x18.feast.core.SavedDataset\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"L\n\x16GetSavedDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xaf\x01\n\x18ListSavedDatasetsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12@\n\x04tags\x18\x03 \x03(\x0b\x32\x32.feast.registry.ListSavedDatasetsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"M\n\x19ListSavedDatasetsResponse\x12\x30\n\x0esaved_datasets\x18\x01 \x03(\x0b\x32\x18.feast.core.SavedDataset\"J\n\x19\x44\x65leteSavedDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\x81\x01\n\x1f\x41pplyValidationReferenceRequest\x12=\n\x14validation_reference\x18\x01 \x01(\x0b\x32\x1f.feast.core.ValidationReference\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"S\n\x1dGetValidationReferenceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xbd\x01\n\x1fListValidationReferencesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12G\n\x04tags\x18\x03 \x03(\x0b\x32\x39.feast.registry.ListValidationReferencesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n ListValidationReferencesResponse\x12>\n\x15validation_references\x18\x01 \x03(\x0b\x32\x1f.feast.core.ValidationReference\"Q\n DeleteValidationReferenceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"e\n\x16\x41pplyPermissionRequest\x12*\n\npermission\x18\x01 \x01(\x0b\x32\x16.feast.core.Permission\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"J\n\x14GetPermissionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xab\x01\n\x16ListPermissionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12>\n\x04tags\x18\x03 \x03(\x0b\x32\x30.feast.registry.ListPermissionsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"F\n\x17ListPermissionsResponse\x12+\n\x0bpermissions\x18\x01 \x03(\x0b\x32\x16.feast.core.Permission\"H\n\x17\x44\x65letePermissionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"K\n\x13\x41pplyProjectRequest\x12$\n\x07project\x18\x01 \x01(\x0b\x32\x13.feast.core.Project\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\x08\"6\n\x11GetProjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"\x94\x01\n\x13ListProjectsRequest\x12\x13\n\x0b\x61llow_cache\x18\x01 \x01(\x08\x12;\n\x04tags\x18\x02 \x03(\x0b\x32-.feast.registry.ListProjectsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"=\n\x14ListProjectsResponse\x12%\n\x08projects\x18\x01 \x03(\x0b\x32\x13.feast.core.Project\"4\n\x14\x44\x65leteProjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\x08\x32\xcb \n\x0eRegistryServer\x12K\n\x0b\x41pplyEntity\x12\".feast.registry.ApplyEntityRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\tGetEntity\x12 .feast.registry.GetEntityRequest\x1a\x12.feast.core.Entity\"\x00\x12[\n\x0cListEntities\x12#.feast.registry.ListEntitiesRequest\x1a$.feast.registry.ListEntitiesResponse\"\x00\x12M\n\x0c\x44\x65leteEntity\x12#.feast.registry.DeleteEntityRequest\x1a\x16.google.protobuf.Empty\"\x00\x12S\n\x0f\x41pplyDataSource\x12&.feast.registry.ApplyDataSourceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12O\n\rGetDataSource\x12$.feast.registry.GetDataSourceRequest\x1a\x16.feast.core.DataSource\"\x00\x12\x64\n\x0fListDataSources\x12&.feast.registry.ListDataSourcesRequest\x1a\'.feast.registry.ListDataSourcesResponse\"\x00\x12U\n\x10\x44\x65leteDataSource\x12\'.feast.registry.DeleteDataSourceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12U\n\x10\x41pplyFeatureView\x12\'.feast.registry.ApplyFeatureViewRequest\x1a\x16.google.protobuf.Empty\"\x00\x12W\n\x11\x44\x65leteFeatureView\x12(.feast.registry.DeleteFeatureViewRequest\x1a\x16.google.protobuf.Empty\"\x00\x12j\n\x11GetAnyFeatureView\x12(.feast.registry.GetAnyFeatureViewRequest\x1a).feast.registry.GetAnyFeatureViewResponse\"\x00\x12p\n\x13ListAllFeatureViews\x12*.feast.registry.ListAllFeatureViewsRequest\x1a+.feast.registry.ListAllFeatureViewsResponse\"\x00\x12R\n\x0eGetFeatureView\x12%.feast.registry.GetFeatureViewRequest\x1a\x17.feast.core.FeatureView\"\x00\x12g\n\x10ListFeatureViews\x12\'.feast.registry.ListFeatureViewsRequest\x1a(.feast.registry.ListFeatureViewsResponse\"\x00\x12\x64\n\x14GetStreamFeatureView\x12+.feast.registry.GetStreamFeatureViewRequest\x1a\x1d.feast.core.StreamFeatureView\"\x00\x12y\n\x16ListStreamFeatureViews\x12-.feast.registry.ListStreamFeatureViewsRequest\x1a..feast.registry.ListStreamFeatureViewsResponse\"\x00\x12j\n\x16GetOnDemandFeatureView\x12-.feast.registry.GetOnDemandFeatureViewRequest\x1a\x1f.feast.core.OnDemandFeatureView\"\x00\x12\x7f\n\x18ListOnDemandFeatureViews\x12/.feast.registry.ListOnDemandFeatureViewsRequest\x1a\x30.feast.registry.ListOnDemandFeatureViewsResponse\"\x00\x12[\n\x13\x41pplyFeatureService\x12*.feast.registry.ApplyFeatureServiceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12[\n\x11GetFeatureService\x12(.feast.registry.GetFeatureServiceRequest\x1a\x1a.feast.core.FeatureService\"\x00\x12p\n\x13ListFeatureServices\x12*.feast.registry.ListFeatureServicesRequest\x1a+.feast.registry.ListFeatureServicesResponse\"\x00\x12]\n\x14\x44\x65leteFeatureService\x12+.feast.registry.DeleteFeatureServiceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12W\n\x11\x41pplySavedDataset\x12(.feast.registry.ApplySavedDatasetRequest\x1a\x16.google.protobuf.Empty\"\x00\x12U\n\x0fGetSavedDataset\x12&.feast.registry.GetSavedDatasetRequest\x1a\x18.feast.core.SavedDataset\"\x00\x12j\n\x11ListSavedDatasets\x12(.feast.registry.ListSavedDatasetsRequest\x1a).feast.registry.ListSavedDatasetsResponse\"\x00\x12Y\n\x12\x44\x65leteSavedDataset\x12).feast.registry.DeleteSavedDatasetRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x65\n\x18\x41pplyValidationReference\x12/.feast.registry.ApplyValidationReferenceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12j\n\x16GetValidationReference\x12-.feast.registry.GetValidationReferenceRequest\x1a\x1f.feast.core.ValidationReference\"\x00\x12\x7f\n\x18ListValidationReferences\x12/.feast.registry.ListValidationReferencesRequest\x1a\x30.feast.registry.ListValidationReferencesResponse\"\x00\x12g\n\x19\x44\x65leteValidationReference\x12\x30.feast.registry.DeleteValidationReferenceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12S\n\x0f\x41pplyPermission\x12&.feast.registry.ApplyPermissionRequest\x1a\x16.google.protobuf.Empty\"\x00\x12O\n\rGetPermission\x12$.feast.registry.GetPermissionRequest\x1a\x16.feast.core.Permission\"\x00\x12\x64\n\x0fListPermissions\x12&.feast.registry.ListPermissionsRequest\x1a\'.feast.registry.ListPermissionsResponse\"\x00\x12U\n\x10\x44\x65letePermission\x12\'.feast.registry.DeletePermissionRequest\x1a\x16.google.protobuf.Empty\"\x00\x12M\n\x0c\x41pplyProject\x12#.feast.registry.ApplyProjectRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x46\n\nGetProject\x12!.feast.registry.GetProjectRequest\x1a\x13.feast.core.Project\"\x00\x12[\n\x0cListProjects\x12#.feast.registry.ListProjectsRequest\x1a$.feast.registry.ListProjectsResponse\"\x00\x12O\n\rDeleteProject\x12$.feast.registry.DeleteProjectRequest\x1a\x16.google.protobuf.Empty\"\x00\x12]\n\x14\x41pplyMaterialization\x12+.feast.registry.ApplyMaterializationRequest\x1a\x16.google.protobuf.Empty\"\x00\x12p\n\x13ListProjectMetadata\x12*.feast.registry.ListProjectMetadataRequest\x1a+.feast.registry.ListProjectMetadataResponse\"\x00\x12K\n\x0bUpdateInfra\x12\".feast.registry.UpdateInfraRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x08GetInfra\x12\x1f.feast.registry.GetInfraRequest\x1a\x11.feast.core.Infra\"\x00\x12:\n\x06\x43ommit\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x07Refresh\x12\x1e.feast.registry.RefreshRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x37\n\x05Proto\x12\x16.google.protobuf.Empty\x1a\x14.feast.core.Registry\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#feast/registry/RegistryServer.proto\x12\x0e\x66\x65\x61st.registry\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19\x66\x65\x61st/core/Registry.proto\x1a\x17\x66\x65\x61st/core/Entity.proto\x1a\x1b\x66\x65\x61st/core/DataSource.proto\x1a\x1c\x66\x65\x61st/core/FeatureView.proto\x1a\"feast/core/StreamFeatureView.proto\x1a$feast/core/OnDemandFeatureView.proto\x1a\x1f\x66\x65\x61st/core/FeatureService.proto\x1a\x1d\x66\x65\x61st/core/SavedDataset.proto\x1a\"feast/core/ValidationProfile.proto\x1a\x1c\x66\x65\x61st/core/InfraObject.proto\x1a\x1b\x66\x65\x61st/core/Permission.proto\x1a\x18\x66\x65\x61st/core/Project.proto\"!\n\x0eRefreshRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\"W\n\x12UpdateInfraRequest\x12 \n\x05infra\x18\x01 \x01(\x0b\x32\x11.feast.core.Infra\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"7\n\x0fGetInfraRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"B\n\x1aListProjectMetadataRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"T\n\x1bListProjectMetadataResponse\x12\x35\n\x10project_metadata\x18\x01 \x03(\x0b\x32\x1b.feast.core.ProjectMetadata\"\xcb\x01\n\x1b\x41pplyMaterializationRequest\x12-\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureView\x12\x0f\n\x07project\x18\x02 \x01(\t\x12.\n\nstart_date\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_date\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06\x63ommit\x18\x05 \x01(\x08\"Y\n\x12\x41pplyEntityRequest\x12\"\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x12.feast.core.Entity\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"F\n\x10GetEntityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xa5\x01\n\x13ListEntitiesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12;\n\x04tags\x18\x03 \x03(\x0b\x32-.feast.registry.ListEntitiesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"<\n\x14ListEntitiesResponse\x12$\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x12.feast.core.Entity\"D\n\x13\x44\x65leteEntityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"f\n\x16\x41pplyDataSourceRequest\x12+\n\x0b\x64\x61ta_source\x18\x01 \x01(\x0b\x32\x16.feast.core.DataSource\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"J\n\x14GetDataSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xab\x01\n\x16ListDataSourcesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12>\n\x04tags\x18\x03 \x03(\x0b\x32\x30.feast.registry.ListDataSourcesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"G\n\x17ListDataSourcesResponse\x12,\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x16.feast.core.DataSource\"H\n\x17\x44\x65leteDataSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\x81\x02\n\x17\x41pplyFeatureViewRequest\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x41\n\x16on_demand_feature_view\x18\x02 \x01(\x0b\x32\x1f.feast.core.OnDemandFeatureViewH\x00\x12<\n\x13stream_feature_view\x18\x03 \x01(\x0b\x32\x1d.feast.core.StreamFeatureViewH\x00\x12\x0f\n\x07project\x18\x04 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x05 \x01(\x08\x42\x13\n\x11\x62\x61se_feature_view\"K\n\x15GetFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xad\x01\n\x17ListFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12?\n\x04tags\x18\x03 \x03(\x0b\x32\x31.feast.registry.ListFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x18ListFeatureViewsResponse\x12.\n\rfeature_views\x18\x01 \x03(\x0b\x32\x17.feast.core.FeatureView\"I\n\x18\x44\x65leteFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\xd6\x01\n\x0e\x41nyFeatureView\x12/\n\x0c\x66\x65\x61ture_view\x18\x01 \x01(\x0b\x32\x17.feast.core.FeatureViewH\x00\x12\x41\n\x16on_demand_feature_view\x18\x02 \x01(\x0b\x32\x1f.feast.core.OnDemandFeatureViewH\x00\x12<\n\x13stream_feature_view\x18\x03 \x01(\x0b\x32\x1d.feast.core.StreamFeatureViewH\x00\x42\x12\n\x10\x61ny_feature_view\"N\n\x18GetAnyFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"U\n\x19GetAnyFeatureViewResponse\x12\x38\n\x10\x61ny_feature_view\x18\x01 \x01(\x0b\x32\x1e.feast.registry.AnyFeatureView\"\xb3\x01\n\x1aListAllFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x42\n\x04tags\x18\x03 \x03(\x0b\x32\x34.feast.registry.ListAllFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x1bListAllFeatureViewsResponse\x12\x35\n\rfeature_views\x18\x01 \x03(\x0b\x32\x1e.feast.registry.AnyFeatureView\"Q\n\x1bGetStreamFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xb9\x01\n\x1dListStreamFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x45\n\x04tags\x18\x03 \x03(\x0b\x32\x37.feast.registry.ListStreamFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"]\n\x1eListStreamFeatureViewsResponse\x12;\n\x14stream_feature_views\x18\x01 \x03(\x0b\x32\x1d.feast.core.StreamFeatureView\"S\n\x1dGetOnDemandFeatureViewRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xbd\x01\n\x1fListOnDemandFeatureViewsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12G\n\x04tags\x18\x03 \x03(\x0b\x32\x39.feast.registry.ListOnDemandFeatureViewsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"d\n ListOnDemandFeatureViewsResponse\x12@\n\x17on_demand_feature_views\x18\x01 \x03(\x0b\x32\x1f.feast.core.OnDemandFeatureView\"r\n\x1a\x41pplyFeatureServiceRequest\x12\x33\n\x0f\x66\x65\x61ture_service\x18\x01 \x01(\x0b\x32\x1a.feast.core.FeatureService\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"N\n\x18GetFeatureServiceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xb3\x01\n\x1aListFeatureServicesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12\x42\n\x04tags\x18\x03 \x03(\x0b\x32\x34.feast.registry.ListFeatureServicesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x1bListFeatureServicesResponse\x12\x34\n\x10\x66\x65\x61ture_services\x18\x01 \x03(\x0b\x32\x1a.feast.core.FeatureService\"L\n\x1b\x44\x65leteFeatureServiceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"l\n\x18\x41pplySavedDatasetRequest\x12/\n\rsaved_dataset\x18\x01 \x01(\x0b\x32\x18.feast.core.SavedDataset\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"L\n\x16GetSavedDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xaf\x01\n\x18ListSavedDatasetsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12@\n\x04tags\x18\x03 \x03(\x0b\x32\x32.feast.registry.ListSavedDatasetsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"M\n\x19ListSavedDatasetsResponse\x12\x30\n\x0esaved_datasets\x18\x01 \x03(\x0b\x32\x18.feast.core.SavedDataset\"J\n\x19\x44\x65leteSavedDatasetRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"\x81\x01\n\x1f\x41pplyValidationReferenceRequest\x12=\n\x14validation_reference\x18\x01 \x01(\x0b\x32\x1f.feast.core.ValidationReference\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"S\n\x1dGetValidationReferenceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xbd\x01\n\x1fListValidationReferencesRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12G\n\x04tags\x18\x03 \x03(\x0b\x32\x39.feast.registry.ListValidationReferencesRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"b\n ListValidationReferencesResponse\x12>\n\x15validation_references\x18\x01 \x03(\x0b\x32\x1f.feast.core.ValidationReference\"Q\n DeleteValidationReferenceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"e\n\x16\x41pplyPermissionRequest\x12*\n\npermission\x18\x01 \x01(\x0b\x32\x16.feast.core.Permission\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"J\n\x14GetPermissionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x03 \x01(\x08\"\xab\x01\n\x16ListPermissionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\x12>\n\x04tags\x18\x03 \x03(\x0b\x32\x30.feast.registry.ListPermissionsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"F\n\x17ListPermissionsResponse\x12+\n\x0bpermissions\x18\x01 \x03(\x0b\x32\x16.feast.core.Permission\"H\n\x17\x44\x65letePermissionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07project\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\x08\"K\n\x13\x41pplyProjectRequest\x12$\n\x07project\x18\x01 \x01(\x0b\x32\x13.feast.core.Project\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\x08\"6\n\x11GetProjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x61llow_cache\x18\x02 \x01(\x08\"\x94\x01\n\x13ListProjectsRequest\x12\x13\n\x0b\x61llow_cache\x18\x01 \x01(\x08\x12;\n\x04tags\x18\x02 \x03(\x0b\x32-.feast.registry.ListProjectsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"=\n\x14ListProjectsResponse\x12%\n\x08projects\x18\x01 \x03(\x0b\x32\x13.feast.core.Project\"4\n\x14\x44\x65leteProjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x02 \x01(\x08\x32\xcb \n\x0eRegistryServer\x12K\n\x0b\x41pplyEntity\x12\".feast.registry.ApplyEntityRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\tGetEntity\x12 .feast.registry.GetEntityRequest\x1a\x12.feast.core.Entity\"\x00\x12[\n\x0cListEntities\x12#.feast.registry.ListEntitiesRequest\x1a$.feast.registry.ListEntitiesResponse\"\x00\x12M\n\x0c\x44\x65leteEntity\x12#.feast.registry.DeleteEntityRequest\x1a\x16.google.protobuf.Empty\"\x00\x12S\n\x0f\x41pplyDataSource\x12&.feast.registry.ApplyDataSourceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12O\n\rGetDataSource\x12$.feast.registry.GetDataSourceRequest\x1a\x16.feast.core.DataSource\"\x00\x12\x64\n\x0fListDataSources\x12&.feast.registry.ListDataSourcesRequest\x1a\'.feast.registry.ListDataSourcesResponse\"\x00\x12U\n\x10\x44\x65leteDataSource\x12\'.feast.registry.DeleteDataSourceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12U\n\x10\x41pplyFeatureView\x12\'.feast.registry.ApplyFeatureViewRequest\x1a\x16.google.protobuf.Empty\"\x00\x12W\n\x11\x44\x65leteFeatureView\x12(.feast.registry.DeleteFeatureViewRequest\x1a\x16.google.protobuf.Empty\"\x00\x12j\n\x11GetAnyFeatureView\x12(.feast.registry.GetAnyFeatureViewRequest\x1a).feast.registry.GetAnyFeatureViewResponse\"\x00\x12p\n\x13ListAllFeatureViews\x12*.feast.registry.ListAllFeatureViewsRequest\x1a+.feast.registry.ListAllFeatureViewsResponse\"\x00\x12R\n\x0eGetFeatureView\x12%.feast.registry.GetFeatureViewRequest\x1a\x17.feast.core.FeatureView\"\x00\x12g\n\x10ListFeatureViews\x12\'.feast.registry.ListFeatureViewsRequest\x1a(.feast.registry.ListFeatureViewsResponse\"\x00\x12\x64\n\x14GetStreamFeatureView\x12+.feast.registry.GetStreamFeatureViewRequest\x1a\x1d.feast.core.StreamFeatureView\"\x00\x12y\n\x16ListStreamFeatureViews\x12-.feast.registry.ListStreamFeatureViewsRequest\x1a..feast.registry.ListStreamFeatureViewsResponse\"\x00\x12j\n\x16GetOnDemandFeatureView\x12-.feast.registry.GetOnDemandFeatureViewRequest\x1a\x1f.feast.core.OnDemandFeatureView\"\x00\x12\x7f\n\x18ListOnDemandFeatureViews\x12/.feast.registry.ListOnDemandFeatureViewsRequest\x1a\x30.feast.registry.ListOnDemandFeatureViewsResponse\"\x00\x12[\n\x13\x41pplyFeatureService\x12*.feast.registry.ApplyFeatureServiceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12[\n\x11GetFeatureService\x12(.feast.registry.GetFeatureServiceRequest\x1a\x1a.feast.core.FeatureService\"\x00\x12p\n\x13ListFeatureServices\x12*.feast.registry.ListFeatureServicesRequest\x1a+.feast.registry.ListFeatureServicesResponse\"\x00\x12]\n\x14\x44\x65leteFeatureService\x12+.feast.registry.DeleteFeatureServiceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12W\n\x11\x41pplySavedDataset\x12(.feast.registry.ApplySavedDatasetRequest\x1a\x16.google.protobuf.Empty\"\x00\x12U\n\x0fGetSavedDataset\x12&.feast.registry.GetSavedDatasetRequest\x1a\x18.feast.core.SavedDataset\"\x00\x12j\n\x11ListSavedDatasets\x12(.feast.registry.ListSavedDatasetsRequest\x1a).feast.registry.ListSavedDatasetsResponse\"\x00\x12Y\n\x12\x44\x65leteSavedDataset\x12).feast.registry.DeleteSavedDatasetRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x65\n\x18\x41pplyValidationReference\x12/.feast.registry.ApplyValidationReferenceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12j\n\x16GetValidationReference\x12-.feast.registry.GetValidationReferenceRequest\x1a\x1f.feast.core.ValidationReference\"\x00\x12\x7f\n\x18ListValidationReferences\x12/.feast.registry.ListValidationReferencesRequest\x1a\x30.feast.registry.ListValidationReferencesResponse\"\x00\x12g\n\x19\x44\x65leteValidationReference\x12\x30.feast.registry.DeleteValidationReferenceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12S\n\x0f\x41pplyPermission\x12&.feast.registry.ApplyPermissionRequest\x1a\x16.google.protobuf.Empty\"\x00\x12O\n\rGetPermission\x12$.feast.registry.GetPermissionRequest\x1a\x16.feast.core.Permission\"\x00\x12\x64\n\x0fListPermissions\x12&.feast.registry.ListPermissionsRequest\x1a\'.feast.registry.ListPermissionsResponse\"\x00\x12U\n\x10\x44\x65letePermission\x12\'.feast.registry.DeletePermissionRequest\x1a\x16.google.protobuf.Empty\"\x00\x12M\n\x0c\x41pplyProject\x12#.feast.registry.ApplyProjectRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x46\n\nGetProject\x12!.feast.registry.GetProjectRequest\x1a\x13.feast.core.Project\"\x00\x12[\n\x0cListProjects\x12#.feast.registry.ListProjectsRequest\x1a$.feast.registry.ListProjectsResponse\"\x00\x12O\n\rDeleteProject\x12$.feast.registry.DeleteProjectRequest\x1a\x16.google.protobuf.Empty\"\x00\x12]\n\x14\x41pplyMaterialization\x12+.feast.registry.ApplyMaterializationRequest\x1a\x16.google.protobuf.Empty\"\x00\x12p\n\x13ListProjectMetadata\x12*.feast.registry.ListProjectMetadataRequest\x1a+.feast.registry.ListProjectMetadataResponse\"\x00\x12K\n\x0bUpdateInfra\x12\".feast.registry.UpdateInfraRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x08GetInfra\x12\x1f.feast.registry.GetInfraRequest\x1a\x11.feast.core.Infra\"\x00\x12:\n\x06\x43ommit\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x07Refresh\x12\x1e.feast.registry.RefreshRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x37\n\x05Proto\x12\x16.google.protobuf.Empty\x1a\x14.feast.core.Registry\"\x00\x42\x35Z3github.com/feast-dev/feast/go/protos/feast/registryb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'feast.registry.RegistryServer_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'Z3github.com/feast-dev/feast/go/protos/feast/registry' _globals['_LISTENTITIESREQUEST_TAGSENTRY']._options = None _globals['_LISTENTITIESREQUEST_TAGSENTRY']._serialized_options = b'8\001' _globals['_LISTDATASOURCESREQUEST_TAGSENTRY']._options = None diff --git a/sdk/python/feast/protos/feast/serving/GrpcServer_pb2.py b/sdk/python/feast/protos/feast/serving/GrpcServer_pb2.py index 8e40630cfff..ce4db37a658 100644 --- a/sdk/python/feast/protos/feast/serving/GrpcServer_pb2.py +++ b/sdk/python/feast/protos/feast/serving/GrpcServer_pb2.py @@ -15,13 +15,14 @@ from feast.protos.feast.serving import ServingService_pb2 as feast_dot_serving_dot_ServingService__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66\x65\x61st/serving/GrpcServer.proto\x1a\"feast/serving/ServingService.proto\"\xb3\x01\n\x0bPushRequest\x12,\n\x08\x66\x65\x61tures\x18\x01 \x03(\x0b\x32\x1a.PushRequest.FeaturesEntry\x12\x1b\n\x13stream_feature_view\x18\x02 \x01(\t\x12\x1c\n\x14\x61llow_registry_cache\x18\x03 \x01(\x08\x12\n\n\x02to\x18\x04 \x01(\t\x1a/\n\rFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1e\n\x0cPushResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\"\xc1\x01\n\x19WriteToOnlineStoreRequest\x12:\n\x08\x66\x65\x61tures\x18\x01 \x03(\x0b\x32(.WriteToOnlineStoreRequest.FeaturesEntry\x12\x19\n\x11\x66\x65\x61ture_view_name\x18\x02 \x01(\t\x12\x1c\n\x14\x61llow_registry_cache\x18\x03 \x01(\x08\x1a/\n\rFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\",\n\x1aWriteToOnlineStoreResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32\xf1\x01\n\x11GrpcFeatureServer\x12%\n\x04Push\x12\x0c.PushRequest\x1a\r.PushResponse\"\x00\x12M\n\x12WriteToOnlineStore\x12\x1a.WriteToOnlineStoreRequest\x1a\x1b.WriteToOnlineStoreResponse\x12\x66\n\x11GetOnlineFeatures\x12\'.feast.serving.GetOnlineFeaturesRequest\x1a(.feast.serving.GetOnlineFeaturesResponseb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66\x65\x61st/serving/GrpcServer.proto\x1a\"feast/serving/ServingService.proto\"\xb3\x01\n\x0bPushRequest\x12,\n\x08\x66\x65\x61tures\x18\x01 \x03(\x0b\x32\x1a.PushRequest.FeaturesEntry\x12\x1b\n\x13stream_feature_view\x18\x02 \x01(\t\x12\x1c\n\x14\x61llow_registry_cache\x18\x03 \x01(\x08\x12\n\n\x02to\x18\x04 \x01(\t\x1a/\n\rFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1e\n\x0cPushResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\"\xc1\x01\n\x19WriteToOnlineStoreRequest\x12:\n\x08\x66\x65\x61tures\x18\x01 \x03(\x0b\x32(.WriteToOnlineStoreRequest.FeaturesEntry\x12\x19\n\x11\x66\x65\x61ture_view_name\x18\x02 \x01(\t\x12\x1c\n\x14\x61llow_registry_cache\x18\x03 \x01(\x08\x1a/\n\rFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\",\n\x1aWriteToOnlineStoreResponse\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32\xf1\x01\n\x11GrpcFeatureServer\x12%\n\x04Push\x12\x0c.PushRequest\x1a\r.PushResponse\"\x00\x12M\n\x12WriteToOnlineStore\x12\x1a.WriteToOnlineStoreRequest\x1a\x1b.WriteToOnlineStoreResponse\x12\x66\n\x11GetOnlineFeatures\x12\'.feast.serving.GetOnlineFeaturesRequest\x1a(.feast.serving.GetOnlineFeaturesResponseB4Z2github.com/feast-dev/feast/go/protos/feast/servingb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'feast.serving.GrpcServer_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'Z2github.com/feast-dev/feast/go/protos/feast/serving' _globals['_PUSHREQUEST_FEATURESENTRY']._options = None _globals['_PUSHREQUEST_FEATURESENTRY']._serialized_options = b'8\001' _globals['_WRITETOONLINESTOREREQUEST_FEATURESENTRY']._options = None diff --git a/sdk/python/feast/registry_server.py b/sdk/python/feast/registry_server.py index 181dc79656e..c9abf62ccd7 100644 --- a/sdk/python/feast/registry_server.py +++ b/sdk/python/feast/registry_server.py @@ -792,9 +792,10 @@ def start_server( reflection.enable_server_reflection(service_names_available_for_reflection, server) if tls_cert_path and tls_key_path: - with open(tls_cert_path, "rb") as cert_file, open( - tls_key_path, "rb" - ) as key_file: + with ( + open(tls_cert_path, "rb") as cert_file, + open(tls_key_path, "rb") as key_file, + ): certificate_chain = cert_file.read() private_key = key_file.read() server_credentials = grpc.ssl_server_credentials( diff --git a/sdk/python/feast/repo_config.py b/sdk/python/feast/repo_config.py index 185a8723ada..66b3f201594 100644 --- a/sdk/python/feast/repo_config.py +++ b/sdk/python/feast/repo_config.py @@ -70,7 +70,7 @@ "dynamodb": "feast.infra.online_stores.dynamodb.DynamoDBOnlineStore", "snowflake.online": "feast.infra.online_stores.snowflake.SnowflakeOnlineStore", "bigtable": "feast.infra.online_stores.bigtable.BigtableOnlineStore", - "postgres": "feast.infra.online_stores.postgres_online_store.PostgreSQLOnlineStore", + "postgres": "feast.infra.online_stores.postgres_online_store.postgres.PostgreSQLOnlineStore", "hbase": "feast.infra.online_stores.hbase_online_store.hbase.HbaseOnlineStore", "cassandra": "feast.infra.online_stores.cassandra_online_store.cassandra_online_store.CassandraOnlineStore", "mysql": "feast.infra.online_stores.mysql_online_store.mysql.MySQLOnlineStore", @@ -80,7 +80,8 @@ "remote": "feast.infra.online_stores.remote.RemoteOnlineStore", "singlestore": "feast.infra.online_stores.singlestore_online_store.singlestore.SingleStoreOnlineStore", "qdrant": "feast.infra.online_stores.cqdrant.QdrantOnlineStore", - "couchbase": "feast.infra.online_stores.couchbase_online_store.couchbase.CouchbaseOnlineStore", + "couchbase.online": "feast.infra.online_stores.couchbase_online_store.couchbase.CouchbaseOnlineStore", + "milvus": "feast.infra.online_stores.milvus_online_store.milvus.MilvusOnlineStore", **LEGACY_ONLINE_STORE_CLASS_FOR_TYPE, } @@ -97,6 +98,7 @@ "mssql": "feast.infra.offline_stores.contrib.mssql_offline_store.mssql.MsSqlServerOfflineStore", "duckdb": "feast.infra.offline_stores.duckdb.DuckDBOfflineStore", "remote": "feast.infra.offline_stores.remote.RemoteOfflineStore", + "couchbase.offline": "feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase.CouchbaseColumnarOfflineStore", } FEATURE_SERVER_CONFIG_CLASS_FOR_TYPE = { @@ -368,14 +370,14 @@ def _validate_auth_config(cls, values: Any) -> Any: ) elif values["auth"]["type"] not in ALLOWED_AUTH_TYPES: raise ValueError( - f'auth configuration has invalid authentication type={values["auth"]["type"]}. Possible ' - f'values={ALLOWED_AUTH_TYPES}' + f"auth configuration has invalid authentication type={values['auth']['type']}. Possible " + f"values={ALLOWED_AUTH_TYPES}" ) elif isinstance(values["auth"], AuthConfig): if values["auth"].type not in ALLOWED_AUTH_TYPES: raise ValueError( - f'auth configuration has invalid authentication type={values["auth"].type}. Possible ' - f'values={ALLOWED_AUTH_TYPES}' + f"auth configuration has invalid authentication type={values['auth'].type}. Possible " + f"values={ALLOWED_AUTH_TYPES}" ) return values diff --git a/sdk/python/feast/repo_operations.py b/sdk/python/feast/repo_operations.py index 4db0bbc6fdb..a3bf52fb10e 100644 --- a/sdk/python/feast/repo_operations.py +++ b/sdk/python/feast/repo_operations.py @@ -43,6 +43,7 @@ def py_path_to_module(path: Path) -> str: str(path.relative_to(os.getcwd()))[: -len(".py")] .replace("./", "") .replace("/", ".") + .replace("\\", ".") ) diff --git a/sdk/python/feast/ssl_ca_trust_store_setup.py b/sdk/python/feast/ssl_ca_trust_store_setup.py new file mode 100644 index 00000000000..72e84132187 --- /dev/null +++ b/sdk/python/feast/ssl_ca_trust_store_setup.py @@ -0,0 +1,22 @@ +import logging +import os + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +def configure_ca_trust_store_env_variables(): + """ + configures the environment variable so that other libraries or servers refer to the TLS ca file path. + :param ca_file_path: + :return: + """ + if ( + "FEAST_CA_CERT_FILE_PATH" in os.environ + and os.environ["FEAST_CA_CERT_FILE_PATH"] + ): + logger.info( + f"Feast CA Cert file path found in environment variable FEAST_CA_CERT_FILE_PATH={os.environ['FEAST_CA_CERT_FILE_PATH']}. Going to refer this path." + ) + os.environ["SSL_CERT_FILE"] = os.environ["FEAST_CA_CERT_FILE_PATH"] + os.environ["REQUESTS_CA_BUNDLE"] = os.environ["FEAST_CA_CERT_FILE_PATH"] diff --git a/sdk/python/feast/static/chat/index.html b/sdk/python/feast/static/chat/index.html new file mode 100644 index 00000000000..302c3b55b6a --- /dev/null +++ b/sdk/python/feast/static/chat/index.html @@ -0,0 +1,129 @@ + + + + + + Feast Chat + + + +
+
Hello! How can I help you today?
+
+
+ + +
+ + + diff --git a/sdk/python/feast/stream_feature_view.py b/sdk/python/feast/stream_feature_view.py index 50e1a221456..42802993226 100644 --- a/sdk/python/feast/stream_feature_view.py +++ b/sdk/python/feast/stream_feature_view.py @@ -31,7 +31,8 @@ from feast.protos.feast.core.Transformation_pb2 import ( UserDefinedFunctionV2 as UserDefinedFunctionProtoV2, ) -from feast.transformation.pandas_transformation import PandasTransformation +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode warnings.simplefilter("once", RuntimeWarning) @@ -75,12 +76,12 @@ class StreamFeatureView(FeatureView): tags: Dict[str, str] owner: str aggregations: List[Aggregation] - mode: str + mode: Union[TransformationMode, str] timestamp_field: str materialization_intervals: List[Tuple[datetime, datetime]] udf: Optional[FunctionType] udf_string: Optional[str] - feature_transformation: Optional[PandasTransformation] + feature_transformation: Optional[Transformation] def __init__( self, @@ -95,11 +96,11 @@ def __init__( owner: str = "", schema: Optional[List[Field]] = None, aggregations: Optional[List[Aggregation]] = None, - mode: Optional[str] = "spark", + mode: Union[str, TransformationMode] = TransformationMode.PYTHON, timestamp_field: Optional[str] = "", udf: Optional[FunctionType] = None, udf_string: Optional[str] = "", - feature_transformation: Optional[Union[PandasTransformation]] = None, + feature_transformation: Optional[Transformation] = None, ): if not flags_helper.is_test(): warnings.warn( @@ -123,11 +124,13 @@ def __init__( ) self.aggregations = aggregations or [] - self.mode = mode or "" + self.mode = mode self.timestamp_field = timestamp_field or "" self.udf = udf self.udf_string = udf_string - self.feature_transformation = feature_transformation + self.feature_transformation = ( + feature_transformation or self.get_feature_transformation() + ) super().__init__( name=name, @@ -141,6 +144,23 @@ def __init__( source=source, ) + def get_feature_transformation(self) -> Optional[Transformation]: + if not self.udf: + # TODO: Currently StreamFeatureView allow no transformation, but this should be removed in the future + return None + if self.mode in ( + TransformationMode.PANDAS, + TransformationMode.PYTHON, + TransformationMode.SPARK, + ) or self.mode in ("pandas", "python", "spark"): + return Transformation( + mode=self.mode, udf=self.udf, udf_string=self.udf_string or "" + ) + else: + raise ValueError( + f"Unsupported transformation mode: {self.mode} for StreamFeatureView" + ) + def __eq__(self, other): if not isinstance(other, StreamFeatureView): raise TypeError("Comparisons should only involve StreamFeatureViews") @@ -198,6 +218,10 @@ def to_proto(self): user_defined_function=udf_proto_v2, ) + mode = ( + self.mode.value if isinstance(self.mode, TransformationMode) else self.mode + ) + spec = StreamFeatureViewSpecProto( name=self.name, entities=self.entities, @@ -214,7 +238,7 @@ def to_proto(self): stream_source=stream_source_proto or None, timestamp_field=self.timestamp_field, aggregations=[agg.to_proto() for agg in self.aggregations], - mode=self.mode, + mode=mode, ) return StreamFeatureViewProto(spec=spec, meta=meta) @@ -264,9 +288,6 @@ def from_proto(cls, sfv_proto): mode=sfv_proto.spec.mode, udf=udf, udf_string=udf_string, - feature_transformation=PandasTransformation(udf, udf_string) - if udf - else None, aggregations=[ Aggregation.from_proto(agg_proto) for agg_proto in sfv_proto.spec.aggregations @@ -323,6 +344,7 @@ def __copy__(self): timestamp_field=self.timestamp_field, source=self.stream_source if self.stream_source else self.batch_source, udf=self.udf, + udf_string=self.udf_string, feature_transformation=self.feature_transformation, ) fv.entities = self.entities @@ -373,7 +395,6 @@ def decorator(user_function): schema=schema, udf=user_function, udf_string=udf_string, - feature_transformation=PandasTransformation(user_function, udf_string), description=description, tags=tags, online=online, diff --git a/sdk/python/feast/templates/cassandra/bootstrap.py b/sdk/python/feast/templates/cassandra/bootstrap.py index 33385141145..16c82316258 100644 --- a/sdk/python/feast/templates/cassandra/bootstrap.py +++ b/sdk/python/feast/templates/cassandra/bootstrap.py @@ -57,7 +57,7 @@ def collect_cassandra_store_settings(): # it's regular Cassandra c_secure_bundle_path = None hosts_string = click.prompt( - ("Enter the seed hosts of your cluster " "(comma-separated IP addresses)"), + ("Enter the seed hosts of your cluster (comma-separated IP addresses)"), default="127.0.0.1", ) c_hosts = [ diff --git a/sdk/python/feast/templates/couchbase/__init__.py b/sdk/python/feast/templates/couchbase/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/templates/couchbase/bootstrap.py b/sdk/python/feast/templates/couchbase/bootstrap.py new file mode 100644 index 00000000000..1034e14e0de --- /dev/null +++ b/sdk/python/feast/templates/couchbase/bootstrap.py @@ -0,0 +1,108 @@ +import click +from couchbase_columnar.cluster import Cluster +from couchbase_columnar.common.errors import InvalidCredentialError +from couchbase_columnar.credential import Credential +from couchbase_columnar.options import ClusterOptions, QueryOptions, TimeoutOptions + +from feast.file_utils import replace_str_in_file +from feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase import ( + CouchbaseColumnarOfflineStoreConfig, + df_to_columnar, +) + + +def bootstrap(): + # Bootstrap() will automatically be called from the init_repo() during `feast init` + + import pathlib + from datetime import datetime, timedelta + + from feast.driver_test_data import create_driver_hourly_stats_df + + repo_path = pathlib.Path(__file__).parent.absolute() / "feature_repo" + config_file = repo_path / "feature_store.yaml" + + if click.confirm("Configure Couchbase Online Store?", default=True): + connection_string = click.prompt( + "Couchbase Connection String", default="couchbase://127.0.0.1" + ) + user = click.prompt("Couchbase Username", default="Administrator") + password = click.prompt("Couchbase Password", hide_input=True) + bucket_name = click.prompt("Couchbase Bucket Name", default="feast") + kv_port = click.prompt("Couchbase KV Port", default=11210) + + replace_str_in_file( + config_file, "COUCHBASE_CONNECTION_STRING", connection_string + ) + replace_str_in_file(config_file, "COUCHBASE_USER", user) + replace_str_in_file(config_file, "COUCHBASE_PASSWORD", password) + replace_str_in_file(config_file, "COUCHBASE_BUCKET_NAME", bucket_name) + replace_str_in_file(config_file, "COUCHBASE_KV_PORT", str(kv_port)) + + if click.confirm( + "Configure Couchbase Columnar Offline Store? (Note: requires Couchbase Capella Columnar)", + default=True, + ): + end_date = datetime.now().replace(microsecond=0, second=0, minute=0) + start_date = end_date - timedelta(days=15) + + driver_entities = [1001, 1002, 1003, 1004, 1005] + driver_df = create_driver_hourly_stats_df(driver_entities, start_date, end_date) + + columnar_connection_string = click.prompt("Columnar Connection String") + columnar_user = click.prompt("Columnar Username") + columnar_password = click.prompt("Columnar Password", hide_input=True) + columnar_timeout = click.prompt("Couchbase Columnar Timeout", default=120) + + if click.confirm( + 'Should I upload example data to Couchbase Capella Columnar (overwriting "Default.Default.feast_driver_hourly_stats" table)?', + default=True, + ): + cred = Credential.from_username_and_password( + columnar_user, columnar_password + ) + timeout_opts = TimeoutOptions(dispatch_timeout=timedelta(seconds=120)) + cluster = Cluster.create_instance( + columnar_connection_string, + cred, + ClusterOptions(timeout_options=timeout_opts), + ) + + table_name = "Default.Default.feast_driver_hourly_stats" + try: + cluster.execute_query( + f"DROP COLLECTION {table_name} IF EXISTS", + QueryOptions(timeout=timedelta(seconds=columnar_timeout)), + ) + except InvalidCredentialError: + print("Error: Invalid Cluster Credentials.") + return + + offline_store = CouchbaseColumnarOfflineStoreConfig( + type="couchbase.offline", + connection_string=columnar_connection_string, + user=columnar_user, + password=columnar_password, + timeout=columnar_timeout, + ) + + df_to_columnar( + df=driver_df, table_name=table_name, offline_store=offline_store + ) + + replace_str_in_file( + config_file, + "COUCHBASE_COLUMNAR_CONNECTION_STRING", + columnar_connection_string, + ) + replace_str_in_file(config_file, "COUCHBASE_COLUMNAR_USER", columnar_user) + replace_str_in_file( + config_file, "COUCHBASE_COLUMNAR_PASSWORD", columnar_password + ) + replace_str_in_file( + config_file, "COUCHBASE_COLUMNAR_TIMEOUT", str(columnar_timeout) + ) + + +if __name__ == "__main__": + bootstrap() diff --git a/sdk/python/feast/templates/couchbase/feature_repo/__init__.py b/sdk/python/feast/templates/couchbase/feature_repo/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/templates/couchbase/feature_repo/example_repo.py b/sdk/python/feast/templates/couchbase/feature_repo/example_repo.py new file mode 100644 index 00000000000..363ba3c4664 --- /dev/null +++ b/sdk/python/feast/templates/couchbase/feature_repo/example_repo.py @@ -0,0 +1,134 @@ +# This is an example feature definition file + +from datetime import timedelta + +import pandas as pd + +from feast import Entity, FeatureService, FeatureView, Field, PushSource, RequestSource +from feast.infra.offline_stores.contrib.couchbase_offline_store.couchbase_source import ( + CouchbaseColumnarSource, +) +from feast.on_demand_feature_view import on_demand_feature_view +from feast.types import Float32, Float64, Int64 + +# Define an entity for the driver. You can think of an entity as a primary key used to +# fetch features. +driver = Entity(name="driver", join_keys=["driver_id"]) + +driver_stats_source = CouchbaseColumnarSource( + name="driver_hourly_stats_source", + query="SELECT * FROM Default.Default.`feast_driver_hourly_stats`", + database="Default", + scope="Default", + collection="feast_driver_hourly_stats", + timestamp_field="event_timestamp", + created_timestamp_column="created", +) + +# Our parquet files contain sample data that includes a driver_id column, timestamps and +# three feature column. Here we define a Feature View that will allow us to serve this +# data to our model online. +driver_stats_fv = FeatureView( + # The unique name of this feature view. Two feature views in a single + # project cannot have the same name + name="driver_hourly_stats", + entities=[driver], + ttl=timedelta(days=1), + # The list of features defined below act as a schema to both define features + # for both materialization of features into a store, and are used as references + # during retrieval for building a training dataset or serving features + schema=[ + Field(name="conv_rate", dtype=Float32), + Field(name="acc_rate", dtype=Float32), + Field(name="avg_daily_trips", dtype=Int64), + ], + online=True, + source=driver_stats_source, + # Tags are user defined key/value pairs that are attached to each + # feature view + tags={"team": "driver_performance"}, +) + +# Define a request data source which encodes features / information only +# available at request time (e.g. part of the user initiated HTTP request) +input_request = RequestSource( + name="vals_to_add", + schema=[ + Field(name="val_to_add", dtype=Int64), + Field(name="val_to_add_2", dtype=Int64), + ], +) + + +# Define an on demand feature view which can generate new features based on +# existing feature views and RequestSource features +@on_demand_feature_view( + sources=[driver_stats_fv, input_request], + schema=[ + Field(name="conv_rate_plus_val1", dtype=Float64), + Field(name="conv_rate_plus_val2", dtype=Float64), + ], +) +def transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["conv_rate_plus_val1"] = inputs["conv_rate"] + inputs["val_to_add"] + df["conv_rate_plus_val2"] = inputs["conv_rate"] + inputs["val_to_add_2"] + return df + + +# This groups features into a model version +driver_activity_v1 = FeatureService( + name="driver_activity_v1", + features=[ + driver_stats_fv[["conv_rate"]], # Sub-selects a feature from a feature view + transformed_conv_rate, # Selects all features from the feature view + ], +) +driver_activity_v2 = FeatureService( + name="driver_activity_v2", features=[driver_stats_fv, transformed_conv_rate] +) + +# Defines a way to push data (to be available offline, online or both) into Feast. +driver_stats_push_source = PushSource( + name="driver_stats_push_source", + batch_source=driver_stats_source, +) + +# Defines a slightly modified version of the feature view from above, where the source +# has been changed to the push source. This allows fresh features to be directly pushed +# to the online store for this feature view. +driver_stats_fresh_fv = FeatureView( + name="driver_hourly_stats_fresh", + entities=[driver], + ttl=timedelta(days=1), + schema=[ + Field(name="conv_rate", dtype=Float32), + Field(name="acc_rate", dtype=Float32), + Field(name="avg_daily_trips", dtype=Int64), + ], + online=True, + source=driver_stats_push_source, # Changed from above + tags={"team": "driver_performance"}, +) + + +# Define an on demand feature view which can generate new features based on +# existing feature views and RequestSource features +@on_demand_feature_view( + sources=[driver_stats_fresh_fv, input_request], # relies on fresh version of FV + schema=[ + Field(name="conv_rate_plus_val1", dtype=Float64), + Field(name="conv_rate_plus_val2", dtype=Float64), + ], +) +def transformed_conv_rate_fresh(inputs: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["conv_rate_plus_val1"] = inputs["conv_rate"] + inputs["val_to_add"] + df["conv_rate_plus_val2"] = inputs["conv_rate"] + inputs["val_to_add_2"] + return df + + +driver_activity_v3 = FeatureService( + name="driver_activity_v3", + features=[driver_stats_fresh_fv, transformed_conv_rate_fresh], +) diff --git a/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml b/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml new file mode 100644 index 00000000000..96f45934eb5 --- /dev/null +++ b/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml @@ -0,0 +1,17 @@ +project: my_project +registry: data/registry.db +provider: local +online_store: + type: couchbase.online + connection_string: COUCHBASE_CONNECTION_STRING # Couchbase connection string, copied from 'Connect' page in Couchbase Capella console + user: COUCHBASE_USER # Couchbase username from database access credentials + password: COUCHBASE_PASSWORD # Couchbase password from database access credentials + bucket_name: COUCHBASE_BUCKET_NAME # Couchbase bucket name, defaults to feast + kv_port: COUCHBASE_KV_PORT # Couchbase key-value port, defaults to 11210. Required if custom ports are used. +offline_store: + type: couchbase.offline + connection_string: COUCHBASE_COLUMNAR_CONNECTION_STRING # Copied from Settings > Connection String page in Capella Columnar console, starts with couchbases:// + user: COUCHBASE_COLUMNAR_USER # Couchbase cluster access name from Settings > Access Control page in Capella Columnar console + password: COUCHBASE_COLUMNAR_PASSWORD # Couchbase password from Settings > Access Control page in Capella Columnar console + timeout: COUCHBASE_COLUMNAR_TIMEOUT # Timeout in seconds for Columnar operations, optional +entity_key_serialization_version: 2 diff --git a/sdk/python/feast/templates/couchbase/feature_repo/test_workflow.py b/sdk/python/feast/templates/couchbase/feature_repo/test_workflow.py new file mode 100644 index 00000000000..192d575181c --- /dev/null +++ b/sdk/python/feast/templates/couchbase/feature_repo/test_workflow.py @@ -0,0 +1,112 @@ +import os.path +import subprocess +from datetime import datetime + +import pandas as pd + +from feast import FeatureStore + + +def run_demo(): + store = FeatureStore(repo_path=os.path.dirname(__file__)) + print("\n--- Run feast apply to setup feature store on Couchbase ---") + subprocess.run(["feast", "--chdir", os.path.dirname(__file__), "apply"]) + + print("\n--- Historical features for training ---") + fetch_historical_features_entity_df(store, for_batch_scoring=False) + + print("\n--- Historical features for batch scoring ---") + fetch_historical_features_entity_df(store, for_batch_scoring=True) + + print("\n--- Load features into online store ---") + store.materialize_incremental(end_date=datetime.now()) + + print("\n--- Online features ---") + fetch_online_features(store) + + print("\n--- Online features retrieved (instead) through a feature service---") + fetch_online_features(store, source="feature_service") + + print( + "\n--- Online features retrieved (using feature service v3, which uses a feature view with a push source---" + ) + fetch_online_features(store, source="push") + + print("\n--- Online features again with updated values from a stream push---") + fetch_online_features(store, source="push") + + print("\n--- Run feast teardown ---") + subprocess.run(["feast", "--chdir", os.path.dirname(__file__), "teardown"]) + + +def fetch_historical_features_entity_df(store: FeatureStore, for_batch_scoring: bool): + # Note: see https://docs.feast.dev/getting-started/concepts/feature-retrieval for more details on how to retrieve + # for all entities in the offline store instead + entity_df = pd.DataFrame.from_dict( + { + # entity's join key -> entity values + "driver_id": [1001, 1002, 1003], + # "event_timestamp" (reserved key) -> timestamps + "event_timestamp": [ + datetime(2021, 4, 12, 10, 59, 42), + datetime(2021, 4, 12, 8, 12, 10), + datetime(2021, 4, 12, 16, 40, 26), + ], + # (optional) label name -> label values. Feast does not process these + "label_driver_reported_satisfaction": [1, 5, 3], + # values we're using for an on-demand transformation + "val_to_add": [1, 2, 3], + "val_to_add_2": [10, 20, 30], + } + ) + # For batch scoring, we want the latest timestamps + if for_batch_scoring: + entity_df["event_timestamp"] = pd.to_datetime("now", utc=True) + + training_df = store.get_historical_features( + entity_df=entity_df, + features=[ + "driver_hourly_stats:conv_rate", + "driver_hourly_stats:acc_rate", + "driver_hourly_stats:avg_daily_trips", + "transformed_conv_rate:conv_rate_plus_val1", + "transformed_conv_rate:conv_rate_plus_val2", + ], + ).to_df() + print(training_df.head()) + + +def fetch_online_features(store, source: str = ""): + entity_rows = [ + # {join_key: entity_value} + { + "driver_id": 1001, + "val_to_add": 1000, + "val_to_add_2": 2000, + }, + { + "driver_id": 1002, + "val_to_add": 1001, + "val_to_add_2": 2002, + }, + ] + if source == "feature_service": + features_to_fetch = store.get_feature_service("driver_activity_v1") + elif source == "push": + features_to_fetch = store.get_feature_service("driver_activity_v3") + else: + features_to_fetch = [ + "driver_hourly_stats:acc_rate", + "transformed_conv_rate:conv_rate_plus_val1", + "transformed_conv_rate:conv_rate_plus_val2", + ] + returned_features = store.get_online_features( + features=features_to_fetch, + entity_rows=entity_rows, + ).to_dict() + for key, value in sorted(returned_features.items()): + print(key, " : ", value) + + +if __name__ == "__main__": + run_demo() diff --git a/sdk/python/feast/templates/couchbase/gitignore b/sdk/python/feast/templates/couchbase/gitignore new file mode 100644 index 00000000000..e86277f60f4 --- /dev/null +++ b/sdk/python/feast/templates/couchbase/gitignore @@ -0,0 +1,45 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*.pyo +*.pyd + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +*.egg-info/ +dist/ +build/ +.venv + +# Pytest +.cache +*.cover +*.log +.coverage +nosetests.xml +coverage.xml +*.hypothesis/ +*.pytest_cache/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IDEs and Editors +.vscode/ +.idea/ +*.swp +*.swo +*.sublime-workspace +*.sublime-project + +# OS generated files +.DS_Store +Thumbs.db diff --git a/sdk/python/feast/transformation/base.py b/sdk/python/feast/transformation/base.py new file mode 100644 index 00000000000..74a1575d638 --- /dev/null +++ b/sdk/python/feast/transformation/base.py @@ -0,0 +1,119 @@ +import functools +from abc import ABC +from typing import Any, Callable, Dict, Optional, Union + +import dill + +from feast.protos.feast.core.Transformation_pb2 import ( + SubstraitTransformationV2 as SubstraitTransformationProto, +) +from feast.protos.feast.core.Transformation_pb2 import ( + UserDefinedFunctionV2 as UserDefinedFunctionProto, +) +from feast.transformation.factory import ( + TRANSFORMATION_CLASS_FOR_TYPE, + get_transformation_class_from_type, +) +from feast.transformation.mode import TransformationMode + + +class Transformation(ABC): + udf: Callable[[Any], Any] + udf_string: str + + def __new__( + cls, + mode: Union[TransformationMode, str], + udf: Callable[[Any], Any], + udf_string: str, + name: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + description: str = "", + owner: str = "", + *args, + **kwargs, + ) -> "Transformation": + if cls is Transformation: + if isinstance(mode, TransformationMode): + mode = mode.value + + if mode.lower() in TRANSFORMATION_CLASS_FOR_TYPE: + subclass = get_transformation_class_from_type(mode.lower()) + return super().__new__(subclass) + + raise ValueError( + f"Invalid mode: {mode}. Choose one from TransformationMode." + ) + + return super().__new__(cls) + + def __init__( + self, + mode: Union[TransformationMode, str], + udf: Callable[[Any], Any], + udf_string: str, + name: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + description: str = "", + owner: str = "", + ): + self.mode = mode if isinstance(mode, str) else mode.value + self.udf = udf + self.udf_string = udf_string + self.name = name + self.tags = tags or {} + self.description = description + self.owner = owner + + def to_proto(self) -> Union[UserDefinedFunctionProto, SubstraitTransformationProto]: + return UserDefinedFunctionProto( + name=self.udf.__name__, + body=dill.dumps(self.udf, recurse=True), + body_text=self.udf_string, + ) + + def transform(self, inputs: Any) -> Any: + raise NotImplementedError + + def transform_arrow(self, *args, **kwargs) -> Any: + pass + + def transform_singleton(self, *args, **kwargs) -> Any: + pass + + def infer_features(self, *args, **kwargs) -> Any: + raise NotImplementedError + + def __deepcopy__(self, memo: Optional[Dict[int, Any]] = None) -> "Transformation": + return Transformation(mode=self.mode, udf=self.udf, udf_string=self.udf_string) + + +def transformation( + mode: Union[TransformationMode, str], + name: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + description: Optional[str] = "", + owner: Optional[str] = "", +): + def mainify(obj): + # Needed to allow dill to properly serialize the udf. Otherwise, clients will need to have a file with the same + # name as the original file defining the sfv. + if obj.__module__ != "__main__": + obj.__module__ = "__main__" + + def decorator(user_function): + udf_string = dill.source.getsource(user_function) + mainify(user_function) + transformation_obj = Transformation( + mode=mode, + name=name or user_function.__name__, + tags=tags, + description=description, + owner=owner, + udf=user_function, + udf_string=udf_string, + ) + functools.update_wrapper(wrapper=transformation_obj, wrapped=user_function) + return transformation_obj + + return decorator diff --git a/sdk/python/feast/transformation/factory.py b/sdk/python/feast/transformation/factory.py new file mode 100644 index 00000000000..5097d71353a --- /dev/null +++ b/sdk/python/feast/transformation/factory.py @@ -0,0 +1,22 @@ +from feast.importer import import_class + +TRANSFORMATION_CLASS_FOR_TYPE = { + "python": "feast.transformation.python_transformation.PythonTransformation", + "pandas": "feast.transformation.pandas_transformation.PandasTransformation", + "substrait": "feast.transformation.substrait_transformation.SubstraitTransformation", + "sql": "feast.transformation.sql_transformation.SQLTransformation", + "spark": "feast.transformation.spark_transformation.SparkTransformation", +} + + +def get_transformation_class_from_type(transformation_type: str): + if transformation_type in TRANSFORMATION_CLASS_FOR_TYPE: + transformation_type = TRANSFORMATION_CLASS_FOR_TYPE[transformation_type] + elif not transformation_type.endswith("Transformation"): + raise ValueError( + f"Invalid transformation type: {transformation_type}. Choose from {list(TRANSFORMATION_CLASS_FOR_TYPE.keys())}." + ) + module_name, transformation_class_type = transformation_type.rsplit(".", 1) + return import_class( + module_name, transformation_class_type, transformation_class_type + ) diff --git a/sdk/python/feast/transformation/mode.py b/sdk/python/feast/transformation/mode.py new file mode 100644 index 00000000000..4bd5ddbe7a3 --- /dev/null +++ b/sdk/python/feast/transformation/mode.py @@ -0,0 +1,9 @@ +from enum import Enum + + +class TransformationMode(Enum): + PYTHON = "python" + PANDAS = "pandas" + SPARK = "spark" + SQL = "sql" + SUBSTRAIT = "substrait" diff --git a/sdk/python/feast/transformation/pandas_transformation.py b/sdk/python/feast/transformation/pandas_transformation.py index 35e786aac8f..469ddaa7768 100644 --- a/sdk/python/feast/transformation/pandas_transformation.py +++ b/sdk/python/feast/transformation/pandas_transformation.py @@ -1,4 +1,5 @@ -from typing import Any, Callable +import inspect +from typing import Any, Callable, Optional, cast, get_type_hints import dill import pandas as pd @@ -8,23 +9,61 @@ from feast.protos.feast.core.Transformation_pb2 import ( UserDefinedFunctionV2 as UserDefinedFunctionProto, ) +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode from feast.type_map import ( python_type_to_feast_value_type, ) -class PandasTransformation: - def __init__(self, udf: Callable[[Any], Any], udf_string: str = ""): - """ - Creates an PandasTransformation object. +class PandasTransformation(Transformation): + def __new__( + cls, + udf: Callable[[Any], Any], + udf_string: str, + name: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + description: str = "", + owner: str = "", + ) -> "PandasTransformation": + instance = super(PandasTransformation, cls).__new__( + cls, + mode=TransformationMode.PANDAS, + udf=udf, + name=name, + udf_string=udf_string, + tags=tags, + description=description, + owner=owner, + ) + return cast(PandasTransformation, instance) - Args: - udf: The user defined transformation function, which must take pandas - dataframes as inputs. - udf_string: The source code version of the udf (for diffing and displaying in Web UI) - """ - self.udf = udf - self.udf_string = udf_string + def __init__( + self, + udf: Callable[[Any], Any], + udf_string: str, + name: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + description: str = "", + owner: str = "", + *args, + **kwargs, + ): + return_annotation = get_type_hints(udf).get("return", inspect._empty) + if return_annotation not in (inspect._empty, pd.DataFrame): + raise TypeError( + f"return signature for PandasTransformation should be pd.DataFrame, instead got {return_annotation}" + ) + + super().__init__( + mode=TransformationMode.PANDAS, + udf=udf, + name=name, + udf_string=udf_string, + tags=tags, + description=description, + owner=owner, + ) def transform_arrow( self, pa_table: pyarrow.Table, features: list[Field] @@ -32,15 +71,15 @@ def transform_arrow( output_df_pandas = self.udf(pa_table.to_pandas()) return pyarrow.Table.from_pandas(output_df_pandas) - def transform(self, input_df: pd.DataFrame) -> pd.DataFrame: - return self.udf(input_df) + def transform(self, inputs: pd.DataFrame) -> pd.DataFrame: + return self.udf(inputs) - def transform_singleton(self, input_df: pd.DataFrame) -> pd.DataFrame: - raise ValueError( - "PandasTransformation does not support singleton transformations." - ) - - def infer_features(self, random_input: dict[str, list[Any]]) -> list[Field]: + def infer_features( + self, + random_input: dict[str, list[Any]], + *args, + **kwargs, + ) -> list[Field]: df = pd.DataFrame.from_dict(random_input) output_df: pd.DataFrame = self.transform(df) @@ -80,13 +119,6 @@ def __eq__(self, other): return True - def to_proto(self) -> UserDefinedFunctionProto: - return UserDefinedFunctionProto( - name=self.udf.__name__, - body=dill.dumps(self.udf, recurse=True), - body_text=self.udf_string, - ) - @classmethod def from_proto(cls, user_defined_function_proto: UserDefinedFunctionProto): return PandasTransformation( diff --git a/sdk/python/feast/transformation/python_transformation.py b/sdk/python/feast/transformation/python_transformation.py index ce2aaf2002d..0c2014e6d66 100644 --- a/sdk/python/feast/transformation/python_transformation.py +++ b/sdk/python/feast/transformation/python_transformation.py @@ -1,5 +1,5 @@ from types import FunctionType -from typing import Any +from typing import Any, Dict, Optional, cast import dill import pyarrow @@ -8,22 +8,73 @@ from feast.protos.feast.core.Transformation_pb2 import ( UserDefinedFunctionV2 as UserDefinedFunctionProto, ) +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode from feast.type_map import ( python_type_to_feast_value_type, ) -class PythonTransformation: - def __init__(self, udf: FunctionType, udf_string: str = ""): +class PythonTransformation(Transformation): + udf: FunctionType + + def __new__( + cls, + udf: FunctionType, + udf_string: str, + singleton: bool = False, + name: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + description: str = "", + owner: str = "", + ) -> "PythonTransformation": + instance = super(PythonTransformation, cls).__new__( + cls, + mode=TransformationMode.PYTHON, + singleton=singleton, + udf=udf, + udf_string=udf_string, + name=name, + tags=tags, + description=description, + owner=owner, + ) + return cast(PythonTransformation, instance) + + def __init__( + self, + udf: FunctionType, + udf_string: str, + singleton: bool = False, + name: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + description: str = "", + owner: str = "", + *args, + **kwargs, + ): """ - Creates an PythonTransformation object. + Creates a PythonTransformation object. + Args: - udf: The user defined transformation function, which must take pandas + udf: The user-defined transformation function, which must take pandas dataframes as inputs. - udf_string: The source code version of the udf (for diffing and displaying in Web UI) + name: The name of the transformation. + udf_string: The source code version of the UDF (for diffing and displaying in Web UI). + tags: Metadata tags for the transformation. + description: A description of the transformation. + owner: The owner of the transformation. """ - self.udf = udf - self.udf_string = udf_string + super().__init__( + mode=TransformationMode.PYTHON, + udf=udf, + name=name, + udf_string=udf_string, + tags=tags, + description=description, + owner=owner, + ) + self.singleton = singleton def transform_arrow( self, @@ -42,10 +93,12 @@ def transform_singleton(self, input_dict: dict) -> dict: # in the case of a singleton element, it takes the value directly # in the case of a list of lists, it takes the first list input_dict = {k: v[0] for k, v in input_dict.items()} - output_dict = self.udf.__call__(input_dict) + output_dict = self.udf(input_dict) return {**input_dict, **output_dict} - def infer_features(self, random_input: dict[str, Any]) -> list[Field]: + def infer_features( + self, random_input: dict[str, Any], singleton: Optional[bool] = False + ) -> list[Field]: output_dict: dict[str, Any] = self.transform(random_input) fields = [] @@ -58,6 +111,10 @@ def infer_features(self, random_input: dict[str, Any]) -> list[Field]: ) inferred_type = type(feature_value[0]) inferred_value = feature_value[0] + if singleton: + inferred_value = feature_value + inferred_type = None # type: ignore + else: inferred_type = type(feature_value) inferred_value = feature_value @@ -69,7 +126,7 @@ def infer_features(self, random_input: dict[str, Any]) -> list[Field]: python_type_to_feast_value_type( feature_name, value=inferred_value, - type_name=inferred_type.__name__, + type_name=inferred_type.__name__ if inferred_type else None, ) ), ) @@ -90,13 +147,6 @@ def __eq__(self, other): return True - def to_proto(self) -> UserDefinedFunctionProto: - return UserDefinedFunctionProto( - name=self.udf.__name__, - body=dill.dumps(self.udf, recurse=True), - body_text=self.udf_string, - ) - @classmethod def from_proto(cls, user_defined_function_proto: UserDefinedFunctionProto): return PythonTransformation( diff --git a/sdk/python/feast/transformation/spark_transformation.py b/sdk/python/feast/transformation/spark_transformation.py new file mode 100644 index 00000000000..d288cf58b08 --- /dev/null +++ b/sdk/python/feast/transformation/spark_transformation.py @@ -0,0 +1,11 @@ +from typing import Any + +from feast.transformation.base import Transformation + + +class SparkTransformation(Transformation): + def transform(self, inputs: Any) -> Any: + pass + + def infer_features(self, *args, **kwargs) -> Any: + pass diff --git a/sdk/python/feast/transformation/sql_transformation.py b/sdk/python/feast/transformation/sql_transformation.py new file mode 100644 index 00000000000..62d6b40de0b --- /dev/null +++ b/sdk/python/feast/transformation/sql_transformation.py @@ -0,0 +1,8 @@ +from typing import Any + +from feast.transformation.base import Transformation + + +class SQLTransformation(Transformation): + def transform(self, inputs: Any) -> str: + return self.udf(inputs) diff --git a/sdk/python/feast/transformation/substrait_transformation.py b/sdk/python/feast/transformation/substrait_transformation.py index 47e2ced9768..476159cd003 100644 --- a/sdk/python/feast/transformation/substrait_transformation.py +++ b/sdk/python/feast/transformation/substrait_transformation.py @@ -1,5 +1,5 @@ -from types import FunctionType -from typing import Any +import inspect +from typing import Any, Callable, Dict, Optional, cast, get_type_hints import dill import pandas as pd @@ -11,23 +11,64 @@ from feast.protos.feast.core.Transformation_pb2 import ( SubstraitTransformationV2 as SubstraitTransformationProto, ) +from feast.transformation.base import Transformation +from feast.transformation.mode import TransformationMode from feast.type_map import ( feast_value_type_to_pandas_type, python_type_to_feast_value_type, ) -class SubstraitTransformation: - def __init__(self, substrait_plan: bytes, ibis_function: FunctionType): +class SubstraitTransformation(Transformation): + def __new__( + cls, + substrait_plan: bytes, + udf: Callable[[Any], Any], + name: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + description: str = "", + owner: str = "", + ) -> "SubstraitTransformation": + instance = super(SubstraitTransformation, cls).__new__( + cls, + mode=TransformationMode.SUBSTRAIT, + udf=udf, + name=name, + udf_string="", + tags=tags, + description=description, + owner=owner, + ) + return cast(SubstraitTransformation, instance) + + def __init__( + self, + substrait_plan: bytes, + udf: Callable[[Any], Any], + name: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + description: str = "", + owner: str = "", + *args, + **kwargs, + ): """ Creates an SubstraitTransformation object. Args: substrait_plan: The user-provided substrait plan. - ibis_function: The user-provided ibis function. + udf: The user-provided ibis function. """ + super().__init__( + mode=TransformationMode.SUBSTRAIT, + udf=udf, + name=name, + udf_string="", + tags=tags, + description=description, + owner=owner, + ) self.substrait_plan = substrait_plan - self.ibis_function = ibis_function def transform(self, df: pd.DataFrame) -> pd.DataFrame: def table_provider(names, schema: pyarrow.Schema): @@ -44,7 +85,7 @@ def transform_singleton(self, input_df: pd.DataFrame) -> pd.DataFrame: ) def transform_ibis(self, table): - return self.ibis_function(table) + return self.udf(table) def transform_arrow( self, pa_table: pyarrow.Table, features: list[Field] = [] @@ -61,7 +102,9 @@ def table_provider(names, schema: pyarrow.Schema): return table - def infer_features(self, random_input: dict[str, list[Any]]) -> list[Field]: + def infer_features( + self, random_input: dict[str, list[Any]], singleton: Optional[bool] + ) -> list[Field]: df = pd.DataFrame.from_dict(random_input) output_df: pd.DataFrame = self.transform(df) @@ -96,14 +139,18 @@ def __eq__(self, other): return ( self.substrait_plan == other.substrait_plan - and self.ibis_function.__code__.co_code - == other.ibis_function.__code__.co_code + and self.udf.__code__.co_code == other.udf.__code__.co_code ) + def __deepcopy__( + self, memo: Optional[Dict[int, Any]] = None + ) -> "SubstraitTransformation": + return SubstraitTransformation(substrait_plan=self.substrait_plan, udf=self.udf) + def to_proto(self) -> SubstraitTransformationProto: return SubstraitTransformationProto( substrait_plan=self.substrait_plan, - ibis_function=dill.dumps(self.ibis_function, recurse=True), + ibis_function=dill.dumps(self.udf, recurse=True), ) @classmethod @@ -113,11 +160,19 @@ def from_proto( ): return SubstraitTransformation( substrait_plan=substrait_transformation_proto.substrait_plan, - ibis_function=dill.loads(substrait_transformation_proto.ibis_function), + udf=dill.loads(substrait_transformation_proto.ibis_function), ) @classmethod def from_ibis(cls, user_function, sources): + from ibis.expr.types.relations import Table + + return_annotation = get_type_hints(user_function).get("return", inspect._empty) + if return_annotation not in (inspect._empty, Table): + raise TypeError( + f"User function must return an ibis Table, got {return_annotation} for SubstraitTransformation" + ) + import ibis import ibis.expr.datatypes as dt from ibis_substrait.compiler.core import SubstraitCompiler @@ -143,7 +198,9 @@ def from_ibis(cls, user_function, sources): expr = user_function(ibis.table(input_fields, "t")) + substrait_plan = compiler.compile(expr).SerializeToString() + return SubstraitTransformation( - substrait_plan=compiler.compile(expr).SerializeToString(), - ibis_function=user_function, + substrait_plan=substrait_plan, + udf=user_function, ) diff --git a/sdk/python/feast/type_map.py b/sdk/python/feast/type_map.py index 8a88c24ffc1..edc9f0c66d8 100644 --- a/sdk/python/feast/type_map.py +++ b/sdk/python/feast/type_map.py @@ -164,6 +164,7 @@ def python_type_to_feast_value_type( "datetime64[ns]": ValueType.UNIX_TIMESTAMP, "datetime64[ns, tz]": ValueType.UNIX_TIMESTAMP, # special dtype of pandas "datetime64[ns, utc]": ValueType.UNIX_TIMESTAMP, + "date": ValueType.UNIX_TIMESTAMP, "category": ValueType.STRING, } @@ -211,8 +212,7 @@ def python_type_to_feast_value_type( return ValueType[common_item_value_type.name + "_LIST"] raise ValueError( - f"Value with native type {type_name} " - f"cannot be converted into Feast value type" + f"Value with native type {type_name} cannot be converted into Feast value type" ) @@ -458,13 +458,13 @@ def _python_value_to_proto_value( # Numpy convert 0 to int. However, in the feature view definition, the type of column may be a float. # So, if value is 0, type validation must pass if scalar_types are either int or float. allowed_types = {np.int64, int, np.float64, float} - assert ( - type(sample) in allowed_types - ), f"Type `{type(sample)}` not in {allowed_types}" + assert type(sample) in allowed_types, ( + f"Type `{type(sample)}` not in {allowed_types}" + ) else: - assert ( - type(sample) in valid_scalar_types - ), f"Type `{type(sample)}` not in {valid_scalar_types}" + assert type(sample) in valid_scalar_types, ( + f"Type `{type(sample)}` not in {valid_scalar_types}" + ) if feast_value_type == ValueType.BOOL: # ProtoValue does not support conversion of np.bool_ so we need to convert it to support np.bool_. return [ @@ -523,6 +523,28 @@ def python_values_to_proto_values( return proto_values +PROTO_VALUE_TO_VALUE_TYPE_MAP: Dict[str, ValueType] = { + "int32_val": ValueType.INT32, + "int64_val": ValueType.INT64, + "double_val": ValueType.DOUBLE, + "float_val": ValueType.FLOAT, + "string_val": ValueType.STRING, + "bytes_val": ValueType.BYTES, + "bool_val": ValueType.BOOL, + "int32_list_val": ValueType.INT32_LIST, + "int64_list_val": ValueType.INT64_LIST, + "double_list_val": ValueType.DOUBLE_LIST, + "float_list_val": ValueType.FLOAT_LIST, + "string_list_val": ValueType.STRING_LIST, + "bytes_list_val": ValueType.BYTES_LIST, + "bool_list_val": ValueType.BOOL_LIST, +} + +VALUE_TYPE_TO_PROTO_VALUE_MAP: Dict[ValueType, str] = { + v: k for k, v in PROTO_VALUE_TO_VALUE_TYPE_MAP.items() +} + + def _proto_value_to_value_type(proto_value: ProtoValue) -> ValueType: """ Returns Feast ValueType given Feast ValueType string. @@ -534,25 +556,9 @@ def _proto_value_to_value_type(proto_value: ProtoValue) -> ValueType: A variant of ValueType. """ proto_str = proto_value.WhichOneof("val") - type_map = { - "int32_val": ValueType.INT32, - "int64_val": ValueType.INT64, - "double_val": ValueType.DOUBLE, - "float_val": ValueType.FLOAT, - "string_val": ValueType.STRING, - "bytes_val": ValueType.BYTES, - "bool_val": ValueType.BOOL, - "int32_list_val": ValueType.INT32_LIST, - "int64_list_val": ValueType.INT64_LIST, - "double_list_val": ValueType.DOUBLE_LIST, - "float_list_val": ValueType.FLOAT_LIST, - "string_list_val": ValueType.STRING_LIST, - "bytes_list_val": ValueType.BYTES_LIST, - "bool_list_val": ValueType.BOOL_LIST, - None: ValueType.NULL, - } - - return type_map[proto_str] + if proto_str is None: + return ValueType.UNKNOWN + return PROTO_VALUE_TO_VALUE_TYPE_MAP[proto_str] def pa_to_feast_value_type(pa_type_as_str: str) -> ValueType: @@ -574,6 +580,12 @@ def pa_to_feast_value_type(pa_type_as_str: str) -> ValueType: "bool": ValueType.BOOL, "null": ValueType.NULL, "list": ValueType.DOUBLE_LIST, + "list": ValueType.INT64_LIST, + "list": ValueType.INT32_LIST, + "list": ValueType.STRING_LIST, + "list": ValueType.BOOL_LIST, + "list": ValueType.BYTES_LIST, + "list": ValueType.FLOAT_LIST, } value_type = type_map[pa_type_as_str] @@ -813,6 +825,7 @@ def spark_to_feast_value_type(spark_type_as_str: str) -> ValueType: "float": ValueType.FLOAT, "boolean": ValueType.BOOL, "timestamp": ValueType.UNIX_TIMESTAMP, + "date": ValueType.UNIX_TIMESTAMP, "array": ValueType.BYTES_LIST, "array": ValueType.STRING_LIST, "array": ValueType.INT32_LIST, @@ -822,6 +835,7 @@ def spark_to_feast_value_type(spark_type_as_str: str) -> ValueType: "array": ValueType.FLOAT_LIST, "array": ValueType.BOOL_LIST, "array": ValueType.UNIX_TIMESTAMP_LIST, + "array": ValueType.UNIX_TIMESTAMP_LIST, } if spark_type_as_str.startswith("decimal"): spark_type_as_str = "decimal" @@ -1069,3 +1083,33 @@ def pa_to_athena_value_type(pa_type: "pyarrow.DataType") -> str: } return type_map[pa_type_as_str] + + +def cb_columnar_type_to_feast_value_type(type_str: str) -> ValueType: + """ + Convert a Couchbase Columnar type string to a Feast ValueType + """ + type_map: Dict[str, ValueType] = { + # primitive types + "boolean": ValueType.BOOL, + "string": ValueType.STRING, + "bigint": ValueType.INT64, + "double": ValueType.DOUBLE, + # special types + "null": ValueType.NULL, + "missing": ValueType.UNKNOWN, + # composite types + # todo: support for arrays of primitives + "object": ValueType.UNKNOWN, + "array": ValueType.UNKNOWN, + "multiset": ValueType.UNKNOWN, + "uuid": ValueType.STRING, + } + value = ( + type_map[type_str.lower()] + if type_str.lower() in type_map + else ValueType.UNKNOWN + ) + if value == ValueType.UNKNOWN: + print("unknown type:", type_str) + return value diff --git a/sdk/python/feast/types.py b/sdk/python/feast/types.py index 9fb3207e6d6..4f13fbf2652 100644 --- a/sdk/python/feast/types.py +++ b/sdk/python/feast/types.py @@ -14,7 +14,7 @@ from abc import ABC, abstractmethod from datetime import datetime, timezone from enum import Enum -from typing import Dict, Union +from typing import Dict, List, Union import pyarrow @@ -196,6 +196,17 @@ def __str__(self): UnixTimestamp: pyarrow.timestamp("us", tz=_utc_now().tzname()), } +FEAST_VECTOR_TYPES: List[Union[ValueType, PrimitiveFeastType, ComplexFeastType]] = [ + ValueType.BYTES_LIST, + ValueType.INT32_LIST, + ValueType.INT64_LIST, + ValueType.FLOAT_LIST, + ValueType.BOOL_LIST, +] +for k in VALUE_TYPES_TO_FEAST_TYPES: + if k in FEAST_VECTOR_TYPES: + FEAST_VECTOR_TYPES.append(VALUE_TYPES_TO_FEAST_TYPES[k]) + def from_feast_to_pyarrow_type(feast_type: FeastType) -> pyarrow.DataType: """ @@ -207,9 +218,9 @@ def from_feast_to_pyarrow_type(feast_type: FeastType) -> pyarrow.DataType: Raises: ValueError: The conversion could not be performed. """ - assert isinstance( - feast_type, (ComplexFeastType, PrimitiveFeastType) - ), f"Expected FeastType, got {type(feast_type)}" + assert isinstance(feast_type, (ComplexFeastType, PrimitiveFeastType)), ( + f"Expected FeastType, got {type(feast_type)}" + ) if isinstance(feast_type, PrimitiveFeastType): if feast_type in FEAST_TYPES_TO_PYARROW_TYPES: return FEAST_TYPES_TO_PYARROW_TYPES[feast_type] @@ -236,3 +247,26 @@ def from_value_type( return VALUE_TYPES_TO_FEAST_TYPES[value_type] raise ValueError(f"Could not convert value type {value_type} to FeastType.") + + +def from_feast_type( + feast_type: FeastType, +) -> ValueType: + """ + Converts a Feast type to a ValueType enum. + + Args: + feast_type: The Feast type to be converted. + + Returns: + The corresponding ValueType enum. + + Raises: + ValueError: The conversion could not be performed. + """ + if feast_type in VALUE_TYPES_TO_FEAST_TYPES.values(): + return list(VALUE_TYPES_TO_FEAST_TYPES.keys())[ + list(VALUE_TYPES_TO_FEAST_TYPES.values()).index(feast_type) + ] + + raise ValueError(f"Could not convert feast type {feast_type} to ValueType.") diff --git a/sdk/python/feast/ui/package.json b/sdk/python/feast/ui/package.json index 2a6329a166b..de74a03f2af 100644 --- a/sdk/python/feast/ui/package.json +++ b/sdk/python/feast/ui/package.json @@ -4,9 +4,9 @@ "private": true, "dependencies": { "@elastic/datemath": "^5.0.3", - "@elastic/eui": "^55.0.1", + "@elastic/eui": "^72.0.0", "@emotion/react": "^11.9.0", - "@feast-dev/feast-ui": "0.41.0", + "@feast-dev/feast-ui": "0.46.0", "@testing-library/jest-dom": "^5.16.4", "@testing-library/react": "^13.2.0", "@testing-library/user-event": "^13.5.0", diff --git a/sdk/python/feast/ui/yarn.lock b/sdk/python/feast/ui/yarn.lock index 24de47b1232..896c9877f14 100644 --- a/sdk/python/feast/ui/yarn.lock +++ b/sdk/python/feast/ui/yarn.lock @@ -1272,10 +1272,10 @@ dependencies: tslib "^1.9.3" -"@elastic/eui@^55.0.1": - version "55.1.2" - resolved "https://registry.yarnpkg.com/@elastic/eui/-/eui-55.1.2.tgz#dd0b42f5b26c5800d6a9cb2d4c2fe1afce9d3f07" - integrity sha512-wwZz5KxMIMFlqEsoCRiQBJDc4CrluS1d0sCOmQ5lhIzKhYc91MdxnqCk2i6YkhL4sSDf2Y9KAEuMXa+uweOWUA== +"@elastic/eui@^72.0.0": + version "72.2.0" + resolved "https://registry.yarnpkg.com/@elastic/eui/-/eui-72.2.0.tgz#0d89ec4c6d8a677ba41d086abd509c5a5ea09180" + integrity sha512-3JHKLWqbU1A6qMVkw0n1VZ5PaL07sd3N44tWsRCn+DEaDv9jq68ilEmY1wdYqKXw8VyFwcPbd8ZYZpdzBD2nPA== dependencies: "@types/chroma-js" "^2.0.0" "@types/lodash" "^4.14.160" @@ -1296,7 +1296,7 @@ react-beautiful-dnd "^13.1.0" react-dropzone "^11.5.3" react-element-to-jsx-string "^14.3.4" - react-focus-on "^3.5.4" + react-focus-on "^3.7.0" react-input-autosize "^3.0.0" react-is "^17.0.2" react-virtualized-auto-sizer "^1.0.6" @@ -1307,7 +1307,7 @@ rehype-stringify "^8.0.0" remark-breaks "^2.0.2" remark-emoji "^2.1.0" - remark-parse "^8.0.3" + remark-parse-no-trim "^8.0.4" remark-rehype "^8.0.0" tabbable "^5.2.1" text-diff "^1.0.1" @@ -1570,25 +1570,26 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@feast-dev/feast-ui@0.41.0": - version "0.41.0" - resolved "https://registry.yarnpkg.com/@feast-dev/feast-ui/-/feast-ui-0.41.0.tgz#67eca6328131ee524ee6a6f286cfc4386f698053" - integrity sha512-BkVb4zfR+j95IX9FBzeXFyCimG5Za1a3jyLqjmETRO3hpp5OJanpc2N35AaOn8ZPqka00Be/b8NZ8TjbsRWyVg== +"@feast-dev/feast-ui@0.46.0": + version "0.46.0" + resolved "https://registry.yarnpkg.com/@feast-dev/feast-ui/-/feast-ui-0.46.0.tgz#2ab5fa42b43c20829a6cbb44e66df8f4ee2597ae" + integrity sha512-d4EgsfhXH1nlpMGuD8M/D/2Z7OryUQkg4cUWvGadj06bwoUM60+ku0gGUZb2PnbfgdUdMrB5p7VS9di0jFurNA== dependencies: "@elastic/datemath" "^5.0.3" "@elastic/eui" "^95.12.0" "@emotion/css" "^11.13.0" "@emotion/react" "^11.13.3" inter-ui "^3.19.3" + long "^5.2.3" moment "^2.29.1" protobufjs "^7.1.1" query-string "^7.1.1" + react-app-polyfill "^3.0.0" react-code-blocks "^0.1.6" react-query "^3.39.3" - react-router-dom "<6.4.0" - react-scripts "^5.0.1" + react-router-dom "^6.28.0" tslib "^2.3.1" - use-query-params "^1.2.3" + use-query-params "^2.2.1" zod "^3.11.6" "@hello-pangea/dnd@^16.6.0": @@ -2055,6 +2056,11 @@ resolved "https://registry.yarnpkg.com/@protobufjs/utf8/-/utf8-1.1.0.tgz#a777360b5b39a1a2e5106f8e858f2fd2d060c570" integrity sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw== +"@remix-run/router@1.21.1": + version "1.21.1" + resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.21.1.tgz#bf15274d3856c395402719fa6b1dc8cc5245aaf7" + integrity sha512-KeBYSwohb8g4/wCcnksvKTYlg69O62sQeLynn2YE+5z7JWEj95if27kclW9QqbrlsQ2DINI8fjbV3zyuKfwjKg== + "@rollup/plugin-babel@^5.2.0": version "5.3.1" resolved "https://registry.yarnpkg.com/@rollup/plugin-babel/-/plugin-babel-5.3.1.tgz#04bc0608f4aa4b2e4b1aebf284344d0f68fda283" @@ -3362,13 +3368,6 @@ argparse@^2.0.1: resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== -aria-hidden@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/aria-hidden/-/aria-hidden-1.1.3.tgz#bb48de18dc84787a3c6eee113709c473c64ec254" - integrity sha512-RhVWFtKH5BiGMycI72q2RAFMLQi8JP9bLuQXgR5a8Znp7P5KOIADSJeyfI8PCVxLEp067B2HbP5JIiI/PXIZeA== - dependencies: - tslib "^1.0.0" - aria-hidden@^1.2.2: version "1.2.4" resolved "https://registry.yarnpkg.com/aria-hidden/-/aria-hidden-1.2.4.tgz#b78e383fdbc04d05762c78b4a25a501e736c4522" @@ -5723,13 +5722,6 @@ flatted@^3.1.0: resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.5.tgz#76c8584f4fc843db64702a6bd04ab7a8bd666da3" integrity sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg== -focus-lock@^0.11.2: - version "0.11.2" - resolved "https://registry.yarnpkg.com/focus-lock/-/focus-lock-0.11.2.tgz#aeef3caf1cea757797ac8afdebaec8fd9ab243ed" - integrity sha512-pZ2bO++NWLHhiKkgP1bEXHhR1/OjVcSvlCJ98aNJDFeb7H5OOQaO+SKOZle6041O9rv2tmbrO4JzClAvDUHf0g== - dependencies: - tslib "^2.0.3" - focus-lock@^1.3.5: version "1.3.5" resolved "https://registry.yarnpkg.com/focus-lock/-/focus-lock-1.3.5.tgz#aa644576e5ec47d227b57eb14e1efb2abf33914c" @@ -7506,6 +7498,11 @@ long@^5.0.0: resolved "https://registry.yarnpkg.com/long/-/long-5.2.0.tgz#2696dadf4b4da2ce3f6f6b89186085d94d52fd61" integrity sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w== +long@^5.2.3: + version "5.2.3" + resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" + integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== + loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -7779,15 +7776,10 @@ nano-time@1.0.0: dependencies: big-integer "^1.6.16" -nanoid@^3.3.3: - version "3.3.4" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab" - integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw== - -nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== +nanoid@^3.3.3, nanoid@^3.3.7: + version "3.3.8" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== natural-compare@^1.4.0: version "1.4.0" @@ -9097,32 +9089,7 @@ react-focus-lock@^2.11.3: use-callback-ref "^1.3.2" use-sidecar "^1.1.2" -react-focus-lock@^2.9.0: - version "2.9.1" - resolved "https://registry.yarnpkg.com/react-focus-lock/-/react-focus-lock-2.9.1.tgz#094cfc19b4f334122c73bb0bff65d77a0c92dd16" - integrity sha512-pSWOQrUmiKLkffPO6BpMXN7SNKXMsuOakl652IBuALAu1esk+IcpJyM+ALcYzPTTFz1rD0R54aB9A4HuP5t1Wg== - dependencies: - "@babel/runtime" "^7.0.0" - focus-lock "^0.11.2" - prop-types "^15.6.2" - react-clientside-effect "^1.2.6" - use-callback-ref "^1.3.0" - use-sidecar "^1.1.2" - -react-focus-on@^3.5.4: - version "3.6.0" - resolved "https://registry.yarnpkg.com/react-focus-on/-/react-focus-on-3.6.0.tgz#159e13082dad4ea1f07abe11254f0e981d5a7b79" - integrity sha512-onIRjpd9trAUenXNdDcvjc8KJUSklty4X/Gr7hAm/MzM7ekSF2pg9D8KBKL7ipige22IAPxLRRf/EmJji9KD6Q== - dependencies: - aria-hidden "^1.1.3" - react-focus-lock "^2.9.0" - react-remove-scroll "^2.5.2" - react-style-singleton "^2.2.0" - tslib "^2.3.1" - use-callback-ref "^1.3.0" - use-sidecar "^1.1.2" - -react-focus-on@^3.9.1: +react-focus-on@^3.7.0, react-focus-on@^3.9.1: version "3.9.4" resolved "https://registry.yarnpkg.com/react-focus-on/-/react-focus-on-3.9.4.tgz#0b6c13273d86243c330d1aa53af39290f543da7b" integrity sha512-NFKmeH6++wu8e7LJcbwV8TTd4L5w/U5LMXTMOdUcXhCcZ7F5VOvgeTHd4XN1PD7TNmdvldDu/ENROOykUQ4yQg== @@ -9203,14 +9170,6 @@ react-refresh@^0.11.0: resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.11.0.tgz#77198b944733f0f1f1a90e791de4541f9f074046" integrity sha512-F27qZr8uUqwhWZboondsPx8tnC3Ct3SxZA3V5WyEvujRyyNv0VYPhoBg1gZ8/MV5tubQp76Trw8lTv9hzRBa+A== -react-remove-scroll-bar@^2.3.1: - version "2.3.1" - resolved "https://registry.yarnpkg.com/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.1.tgz#9f13b05b249eaa57c8d646c1ebb83006b3581f5f" - integrity sha512-IvGX3mJclEF7+hga8APZczve1UyGMkMG+tjS0o/U1iLgvZRpjFAQEUBJ4JETfvbNlfNnZnoDyWJCICkA15Mghg== - dependencies: - react-style-singleton "^2.2.0" - tslib "^2.0.0" - react-remove-scroll-bar@^2.3.4, react-remove-scroll-bar@^2.3.6: version "2.3.6" resolved "https://registry.yarnpkg.com/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz#3e585e9d163be84a010180b18721e851ac81a29c" @@ -9219,17 +9178,6 @@ react-remove-scroll-bar@^2.3.4, react-remove-scroll-bar@^2.3.6: react-style-singleton "^2.2.1" tslib "^2.0.0" -react-remove-scroll@^2.5.2: - version "2.5.3" - resolved "https://registry.yarnpkg.com/react-remove-scroll/-/react-remove-scroll-2.5.3.tgz#a152196e710e8e5811be39dc352fd8a90b05c961" - integrity sha512-NQ1bXrxKrnK5pFo/GhLkXeo3CrK5steI+5L+jynwwIemvZyfXqaL0L5BzwJd7CSwNCU723DZaccvjuyOdoy3Xw== - dependencies: - react-remove-scroll-bar "^2.3.1" - react-style-singleton "^2.2.0" - tslib "^2.0.0" - use-callback-ref "^1.3.0" - use-sidecar "^1.1.2" - react-remove-scroll@^2.6.0: version "2.6.0" resolved "https://registry.yarnpkg.com/react-remove-scroll/-/react-remove-scroll-2.6.0.tgz#fb03a0845d7768a4f1519a99fdb84983b793dc07" @@ -9241,7 +9189,7 @@ react-remove-scroll@^2.6.0: use-callback-ref "^1.3.0" use-sidecar "^1.1.2" -react-router-dom@6, react-router-dom@<6.4.0: +react-router-dom@6: version "6.3.0" resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.3.0.tgz#a0216da813454e521905b5fa55e0e5176123f43d" integrity sha512-uaJj7LKytRxZNQV8+RbzJWnJ8K2nPsOOEuX7aQstlMZKQT0164C+X2w6bnkqU3sjtLvpd5ojrezAyfZ1+0sStw== @@ -9249,6 +9197,21 @@ react-router-dom@6, react-router-dom@<6.4.0: history "^5.2.0" react-router "6.3.0" +react-router-dom@^6.28.0: + version "6.28.2" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.28.2.tgz#9bc4f58b0cfe91d39d1a6be4beb0ef051ca9b06e" + integrity sha512-O81EWqNJWqvlN/a7eTudAdQm0TbI7hw+WIi7OwwMcTn5JMyZ0ibTFNGz+t+Lju0df4LcqowCegcrK22lB1q9Kw== + dependencies: + "@remix-run/router" "1.21.1" + react-router "6.28.2" + +react-router@6.28.2: + version "6.28.2" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.28.2.tgz#1ddea57c2de0d99e12d00af14d1499703f1378a9" + integrity sha512-BgFY7+wEGVjHCiqaj2XiUBQ1kkzfg6UoKYwEe0wv+FF+HNPCxtS/MVPvLAPH++EsuCMReZl9RYVGqcHLk5ms3A== + dependencies: + "@remix-run/router" "1.21.1" + react-router@6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.3.0.tgz#3970cc64b4cb4eae0c1ea5203a80334fdd175557" @@ -9256,7 +9219,7 @@ react-router@6.3.0: dependencies: history "^5.2.0" -react-scripts@^5.0.0, react-scripts@^5.0.1: +react-scripts@^5.0.0: version "5.0.1" resolved "https://registry.yarnpkg.com/react-scripts/-/react-scripts-5.0.1.tgz#6285dbd65a8ba6e49ca8d651ce30645a6d980003" integrity sha512-8VAmEm/ZAwQzJ+GOMLbBsTdDKOpuZh7RPs0UymvBR2vRk4iZWCskjbFnxqjrzoIvlNNRZ3QJFx6/qDSi6zSnaQ== @@ -9311,15 +9274,6 @@ react-scripts@^5.0.0, react-scripts@^5.0.1: optionalDependencies: fsevents "^2.3.2" -react-style-singleton@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/react-style-singleton/-/react-style-singleton-2.2.0.tgz#70f45f5fef97fdb9a52eed98d1839fa6b9032b22" - integrity sha512-nK7mN92DMYZEu3cQcAhfwE48NpzO5RpxjG4okbSqRRbfal9Pk+fG2RdQXTMp+f6all1hB9LIJSt+j7dCYrU11g== - dependencies: - get-nonce "^1.0.0" - invariant "^2.2.4" - tslib "^2.0.0" - react-style-singleton@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/react-style-singleton/-/react-style-singleton-2.2.1.tgz#f99e420492b2d8f34d38308ff660b60d0b1205b4" @@ -9583,28 +9537,6 @@ remark-parse-no-trim@^8.0.4: vfile-location "^3.0.0" xtend "^4.0.1" -remark-parse@^8.0.3: - version "8.0.3" - resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-8.0.3.tgz#9c62aa3b35b79a486454c690472906075f40c7e1" - integrity sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q== - dependencies: - ccount "^1.0.0" - collapse-white-space "^1.0.2" - is-alphabetical "^1.0.0" - is-decimal "^1.0.0" - is-whitespace-character "^1.0.0" - is-word-character "^1.0.0" - markdown-escapes "^1.0.0" - parse-entities "^2.0.0" - repeat-string "^1.5.4" - state-toggle "^1.0.0" - trim "0.0.1" - trim-trailing-lines "^1.0.0" - unherit "^1.0.4" - unist-util-remove-position "^2.0.0" - vfile-location "^3.0.0" - xtend "^4.0.1" - remark-rehype@^8.0.0, remark-rehype@^8.1.0: version "8.1.0" resolved "https://registry.yarnpkg.com/remark-rehype/-/remark-rehype-8.1.0.tgz#610509a043484c1e697437fa5eb3fd992617c945" @@ -9917,6 +9849,11 @@ serialize-query-params@^1.3.5: resolved "https://registry.yarnpkg.com/serialize-query-params/-/serialize-query-params-1.3.6.tgz#5dd5225db85ce747fe6fbc4897628504faafec6d" integrity sha512-VlH7sfWNyPVZClPkRacopn6sn5uQMXBsjPVz1+pBHX895VpcYVznfJtZ49e6jymcrz+l/vowkepCZn/7xEAEdw== +serialize-query-params@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/serialize-query-params/-/serialize-query-params-2.0.2.tgz#598a3fb9e13f4ea1c1992fbd20231aa16b31db81" + integrity sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q== + serve-index@^1.9.1: version "1.9.1" resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" @@ -10637,11 +10574,6 @@ trim-trailing-lines@^1.0.0: resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" integrity sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ== -trim@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" - integrity sha1-WFhUf2spB1fulczMZm+1AITEYN0= - trough@^1.0.0: version "1.0.5" resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" @@ -10667,7 +10599,7 @@ tslib@2.6.2: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== -tslib@^1.0.0, tslib@^1.8.1, tslib@^1.9.3: +tslib@^1.8.1, tslib@^1.9.3: version "1.14.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== @@ -10967,6 +10899,13 @@ use-query-params@^1.2.3: dependencies: serialize-query-params "^1.3.5" +use-query-params@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/use-query-params/-/use-query-params-2.2.1.tgz#c558ab70706f319112fbccabf6867b9f904e947d" + integrity sha512-i6alcyLB8w9i3ZK3caNftdb+UnbfBRNPDnc89CNQWkGRmDrm/gfydHvMBfVsQJRq3NoHOM2dt/ceBWG2397v1Q== + dependencies: + serialize-query-params "^2.0.2" + use-sidecar@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/use-sidecar/-/use-sidecar-1.1.2.tgz#2f43126ba2d7d7e117aa5855e5d8f0276dfe73c2" diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py index 1d115920c3a..d852bb279cc 100644 --- a/sdk/python/feast/ui_server.py +++ b/sdk/python/feast/ui_server.py @@ -69,6 +69,8 @@ def shutdown_event(): @app.get("/registry") def read_registry(): + if registry_proto is None: + return Response(status_code=503) # Service Unavailable return Response( content=registry_proto.SerializeToString(), media_type="application/octet-stream", diff --git a/sdk/python/feast/utils.py b/sdk/python/feast/utils.py index 51d4bf4f2cc..4cca1379ed3 100644 --- a/sdk/python/feast/utils.py +++ b/sdk/python/feast/utils.py @@ -343,6 +343,22 @@ def _convert_arrow_odfv_to_proto( for column, value_type in columns if column in table.column_names } + + # Ensure join keys are included in proto_values_by_column, but check if they exist first + for join_key, value_type in join_keys.items(): + if join_key not in proto_values_by_column: + # Check if the join key exists in the table before trying to access it + if join_key in table.column_names: + proto_values_by_column[join_key] = python_values_to_proto_values( + table.column(join_key).to_numpy(zero_copy_only=False), value_type + ) + else: + # Create null/default values if the join key isn't in the table + null_column = [None] * table.num_rows + proto_values_by_column[join_key] = python_values_to_proto_values( + null_column, value_type + ) + # Adding On Demand Features for feature in feature_view.features: if ( @@ -357,7 +373,7 @@ def _convert_arrow_odfv_to_proto( updated_table = pyarrow.RecordBatch.from_arrays( table.columns + [null_column], schema=table.schema.append( - pyarrow.field(feature.name, null_column.type) + pyarrow.field(feature.name, null_column.type) # type: ignore[attr-defined] ), ) proto_values_by_column[feature.name] = python_values_to_proto_values( @@ -368,7 +384,11 @@ def _convert_arrow_odfv_to_proto( entity_keys = [ EntityKeyProto( join_keys=join_keys, - entity_values=[proto_values_by_column[k][idx] for k in join_keys], + entity_values=[ + proto_values_by_column[k][idx] + for k in join_keys + if k in proto_values_by_column + ], ) for idx in range(table.num_rows) ] @@ -378,6 +398,12 @@ def _convert_arrow_odfv_to_proto( feature.name: proto_values_by_column[feature.name] for feature in feature_view.features } + if feature_view.write_to_online_store: + table_columns = [col.name for col in table.schema] + for feature in feature_view.schema: + if feature.name not in feature_dict and feature.name in table_columns: + feature_dict[feature.name] = proto_values_by_column[feature.name] + features = [dict(zip(feature_dict, vars)) for vars in zip(*feature_dict.values())] # We need to artificially add event_timestamps and created_timestamps @@ -441,19 +467,24 @@ def _group_feature_refs( all_feature_views: List["FeatureView"], all_on_demand_feature_views: List["OnDemandFeatureView"], ) -> Tuple[ - List[Tuple["FeatureView", List[str]]], List[Tuple["OnDemandFeatureView", List[str]]] + List[Tuple[Union["FeatureView", "OnDemandFeatureView"], List[str]]], + List[Tuple["OnDemandFeatureView", List[str]]], ]: """Get list of feature views and corresponding feature names based on feature references""" # view name to view proto - view_index = {view.projection.name_to_use(): view for view in all_feature_views} + view_index: Dict[str, Union["FeatureView", "OnDemandFeatureView"]] = { + view.projection.name_to_use(): view for view in all_feature_views + } # on demand view to on demand view proto - on_demand_view_index = { - view.projection.name_to_use(): view - for view in all_on_demand_feature_views - if view.projection - } + on_demand_view_index: Dict[str, "OnDemandFeatureView"] = {} + for view in all_on_demand_feature_views: + if view.projection and not view.write_to_online_store: + on_demand_view_index[view.projection.name_to_use()] = view + elif view.projection and view.write_to_online_store: + # we insert the ODFV view to FVs for ones that are written to the online store + view_index[view.projection.name_to_use()] = view # view name to feature names views_features = defaultdict(set) @@ -464,7 +495,16 @@ def _group_feature_refs( for ref in features: view_name, feat_name = ref.split(":") if view_name in view_index: - view_index[view_name].projection.get_feature(feat_name) # For validation + if hasattr(view_index[view_name], "write_to_online_store"): + tmp_feat_name = [ + f for f in view_index[view_name].schema if f.name == feat_name + ] + if len(tmp_feat_name) > 0: + feat_name = tmp_feat_name[0].name + else: + view_index[view_name].projection.get_feature( + feat_name + ) # For validation views_features[view_name].add(feat_name) elif view_name in on_demand_view_index: on_demand_view_index[view_name].projection.get_feature( @@ -480,7 +520,7 @@ def _group_feature_refs( else: raise FeatureViewNotFoundException(view_name) - fvs_result: List[Tuple["FeatureView", List[str]]] = [] + fvs_result: List[Tuple[Union["FeatureView", "OnDemandFeatureView"], List[str]]] = [] odfvs_result: List[Tuple["OnDemandFeatureView", List[str]]] = [] for view_name, feature_names in views_features.items(): @@ -490,16 +530,28 @@ def _group_feature_refs( return fvs_result, odfvs_result -def apply_list_mapping( - lst: Iterable[Any], mapping_indexes: Iterable[List[int]] -) -> Iterable[Any]: - output_len = sum(len(item) for item in mapping_indexes) - output = [None] * output_len - for elem, destinations in zip(lst, mapping_indexes): - for idx in destinations: - output[idx] = elem +def construct_response_feature_vector( + values_vector: Iterable[Any], + statuses_vector: Iterable[Any], + timestamp_vector: Iterable[Any], + mapping_indexes: Iterable[List[int]], + output_len: int, +) -> GetOnlineFeaturesResponse.FeatureVector: + values_output: Iterable[Any] = [None] * output_len + statuses_output: Iterable[Any] = [None] * output_len + timestamp_output: Iterable[Any] = [None] * output_len - return output + for i, destinations in enumerate(mapping_indexes): + for idx in destinations: + values_output[idx] = values_vector[i] # type: ignore[index] + statuses_output[idx] = statuses_vector[i] # type: ignore[index] + timestamp_output[idx] = timestamp_vector[i] # type: ignore[index] + + return GetOnlineFeaturesResponse.FeatureVector( + values=values_output, + statuses=statuses_output, + event_timestamps=timestamp_output, + ) def _augment_response_with_on_demand_transforms( @@ -545,73 +597,74 @@ def _augment_response_with_on_demand_transforms( odfv_result_names = set() for odfv_name, _feature_refs in odfv_feature_refs.items(): odfv = requested_odfv_map[odfv_name] - if odfv.mode == "python": - if initial_response_dict is None: - initial_response_dict = initial_response.to_dict() - transformed_features_dict: Dict[str, List[Any]] = odfv.transform_dict( - initial_response_dict - ) - elif odfv.mode in {"pandas", "substrait"}: - if initial_response_arrow is None: - initial_response_arrow = initial_response.to_arrow() - transformed_features_arrow = odfv.transform_arrow( - initial_response_arrow, full_feature_names + if not odfv.write_to_online_store: + if odfv.mode == "python": + if initial_response_dict is None: + initial_response_dict = initial_response.to_dict() + transformed_features_dict: Dict[str, List[Any]] = odfv.transform_dict( + initial_response_dict + ) + elif odfv.mode in {"pandas", "substrait"}: + if initial_response_arrow is None: + initial_response_arrow = initial_response.to_arrow() + transformed_features_arrow = odfv.transform_arrow( + initial_response_arrow, full_feature_names + ) + else: + raise Exception( + f"Invalid OnDemandFeatureMode: {odfv.mode}. Expected one of 'pandas', 'python', or 'substrait'." + ) + + transformed_features = ( + transformed_features_dict + if odfv.mode == "python" + else transformed_features_arrow ) - else: - raise Exception( - f"Invalid OnDemandFeatureMode: {odfv.mode}. Expected one of 'pandas', 'python', or 'substrait'." + transformed_columns = ( + transformed_features.column_names + if isinstance(transformed_features, pyarrow.Table) + else transformed_features ) - - transformed_features = ( - transformed_features_dict - if odfv.mode == "python" - else transformed_features_arrow - ) - transformed_columns = ( - transformed_features.column_names - if isinstance(transformed_features, pyarrow.Table) - else transformed_features - ) - selected_subset = [f for f in transformed_columns if f in _feature_refs] - - proto_values = [] - schema_dict = {k.name: k.dtype for k in odfv.schema} - for selected_feature in selected_subset: - feature_vector = transformed_features[selected_feature] - selected_feature_type = schema_dict.get(selected_feature, None) - feature_type: ValueType = ValueType.UNKNOWN - if selected_feature_type is not None: - if isinstance( - selected_feature_type, (ComplexFeastType, PrimitiveFeastType) - ): - feature_type = selected_feature_type.to_value_type() - elif not isinstance(selected_feature_type, ValueType): - raise TypeError( - f"Unexpected type for feature_type: {type(feature_type)}" + selected_subset = [f for f in transformed_columns if f in _feature_refs] + + proto_values = [] + schema_dict = {k.name: k.dtype for k in odfv.schema} + for selected_feature in selected_subset: + feature_vector = transformed_features[selected_feature] + selected_feature_type = schema_dict.get(selected_feature, None) + feature_type: ValueType = ValueType.UNKNOWN + if selected_feature_type is not None: + if isinstance( + selected_feature_type, (ComplexFeastType, PrimitiveFeastType) + ): + feature_type = selected_feature_type.to_value_type() + elif not isinstance(selected_feature_type, ValueType): + raise TypeError( + f"Unexpected type for feature_type: {type(feature_type)}" + ) + + proto_values.append( + python_values_to_proto_values( + feature_vector + if isinstance(feature_vector, list) + else [feature_vector] + if odfv.mode == "python" + else feature_vector.to_numpy(), + feature_type, ) - - proto_values.append( - python_values_to_proto_values( - feature_vector - if isinstance(feature_vector, list) - else [feature_vector] - if odfv.mode == "python" - else feature_vector.to_numpy(), - feature_type, ) - ) - odfv_result_names |= set(selected_subset) + odfv_result_names |= set(selected_subset) - online_features_response.metadata.feature_names.val.extend(selected_subset) - for feature_idx in range(len(selected_subset)): - online_features_response.results.append( - GetOnlineFeaturesResponse.FeatureVector( - values=proto_values[feature_idx], - statuses=[FieldStatus.PRESENT] * len(proto_values[feature_idx]), - event_timestamps=[Timestamp()] * len(proto_values[feature_idx]), + online_features_response.metadata.feature_names.val.extend(selected_subset) + for feature_idx in range(len(selected_subset)): + online_features_response.results.append( + GetOnlineFeaturesResponse.FeatureVector( + values=proto_values[feature_idx], + statuses=[FieldStatus.PRESENT] * len(proto_values[feature_idx]), + event_timestamps=[Timestamp()] * len(proto_values[feature_idx]), + ) ) - ) def _get_entity_maps( @@ -674,7 +727,7 @@ def _get_unique_entities( table: "FeatureView", join_key_values: Dict[str, List[ValueProto]], entity_name_to_join_key_map: Dict[str, str], -) -> Tuple[Tuple[Dict[str, ValueProto], ...], Tuple[List[int], ...]]: +) -> Tuple[Tuple[Dict[str, ValueProto], ...], Tuple[List[int], ...], int]: """Return the set of unique composite Entities for a Feature View and the indexes at which they appear. This method allows us to query the OnlineStore for data we need only once @@ -687,8 +740,60 @@ def _get_unique_entities( entity_name_to_join_key_map, join_key_values, ) + # Validate that all expected join keys exist and have non-empty values. + expected_keys = set(entity_name_to_join_key_map.values()) + expected_keys.discard("__dummy_id") + missing_keys = sorted( + list(set([key for key in expected_keys if key not in table_entity_values])) + ) + empty_keys = sorted( + list(set([key for key in expected_keys if not table_entity_values.get(key)])) + ) + + if missing_keys or empty_keys: + if not any(table_entity_values.values()): + raise KeyError( + f"Missing join key values for keys: {missing_keys}. " + f"No values provided for keys: {empty_keys}. " + f"Provided join_key_values: {list(join_key_values.keys())}" + ) + + # Convert the column-oriented table_entity_values into row-wise data. + keys = list(table_entity_values.keys()) + # Each row is a tuple of ValueProto objects corresponding to the join keys. + rowise = list(enumerate(zip(*table_entity_values.values()))) + + # If there are no rows, return empty tuples. + if not rowise: + return (), (), 0 + + # Sort rowise so that rows with the same join key values are adjacent. + rowise.sort(key=lambda row: tuple(getattr(x, x.WhichOneof("val")) for x in row[1])) - # Convert back to rowise. + # Group rows by their composite join key value. + groups = [ + (dict(zip(keys, key_tuple)), [idx for idx, _ in group]) + for key_tuple, group in itertools.groupby(rowise, key=lambda row: row[1]) + ] + + # If no groups were formed (should not happen for valid input), return empty tuples. + if not groups: + return (), (), 0 + + # Unpack the unique entities and their original row indexes. + unique_entities, indexes = tuple(zip(*groups)) + return unique_entities, indexes, len(rowise) + + +def _get_unique_entities_from_values( + table_entity_values: Dict[str, List[ValueProto]], +) -> Tuple[Tuple[Dict[str, ValueProto], ...], Tuple[List[int], ...], int]: + """Return the set of unique composite Entities for a Feature View and the indexes at which they appear. + + This method allows us to query the OnlineStore for data we need only once + rather than requesting and processing data for the same combination of + Entities multiple times. + """ keys = table_entity_values.keys() # Sort the rowise data to allow for grouping but keep original index. This lambda is # sufficient as Entity types cannot be complex (ie. lists). @@ -706,7 +811,7 @@ def _get_unique_entities( ] ) ) - return unique_entities, indexes + return unique_entities, indexes, len(rowise) def _drop_unneeded_columns( @@ -757,6 +862,7 @@ def get_needed_request_data( needed_request_data: Set[str] = set() for odfv, _ in grouped_odfv_refs: odfv_request_data_schema = odfv.get_request_data_schema() + # if odfv.write_to_online_store, we should not pass in the request data needed_request_data.update(odfv_request_data_schema.keys()) return needed_request_data @@ -783,6 +889,7 @@ def _populate_response_from_feature_data( full_feature_names: bool, requested_features: Iterable[str], table: "FeatureView", + output_len: int, ): """Populate the GetOnlineFeaturesResponse with feature data. @@ -801,33 +908,82 @@ def _populate_response_from_feature_data( requested_features: The names of the features in `feature_data`. This should be ordered in the same way as the data in `feature_data`. table: The FeatureView that `feature_data` was retrieved from. + output_len: The number of result rows in `online_features_response`. """ # Add the feature names to the response. + table_name = table.projection.name_to_use() requested_feature_refs = [ - ( - f"{table.projection.name_to_use()}__{feature_name}" - if full_feature_names - else feature_name - ) + f"{table_name}__{feature_name}" if full_feature_names else feature_name for feature_name in requested_features ] online_features_response.metadata.feature_names.val.extend(requested_feature_refs) + # Process each feature vector in a single pass + for timestamp_vector, statuses_vector, values_vector in feature_data: + response_vector = construct_response_feature_vector( + values_vector, statuses_vector, timestamp_vector, indexes, output_len + ) + online_features_response.results.append(response_vector) + + +def _populate_response_from_feature_data_v2( + feature_data: Iterable[ + Tuple[ + Iterable[Timestamp], Iterable["FieldStatus.ValueType"], Iterable[ValueProto] + ] + ], + indexes: Iterable[List[int]], + online_features_response: GetOnlineFeaturesResponse, + requested_features: Iterable[str], + output_len: int, +): + """Populate the GetOnlineFeaturesResponse with feature data. + + This method assumes that `_read_from_online_store` returns data for each + combination of Entities in `entity_rows` in the same order as they + are provided. + + Args: + feature_data: A list of data in Protobuf form which was retrieved from the OnlineStore. + indexes: A list of indexes which should be the same length as `feature_data`. Each list + of indexes corresponds to a set of result rows in `online_features_response`. + online_features_response: The object to populate. + full_feature_names: A boolean that provides the option to add the feature view prefixes to the feature names, + changing them from the format "feature" to "feature_view__feature" (e.g., "daily_transactions" changes to + "customer_fv__daily_transactions"). + requested_features: The names of the features in `feature_data`. This should be ordered in the same way as the + data in `feature_data`. + output_len: The number of result rows in `online_features_response`. + """ + # Add the feature names to the response. + requested_feature_refs = [(feature_name) for feature_name in requested_features] + online_features_response.metadata.feature_names.val.extend(requested_feature_refs) + timestamps, statuses, values = zip(*feature_data) # Populate the result with data fetched from the OnlineStore # which is guaranteed to be aligned with `requested_features`. - for ( - feature_idx, - (timestamp_vector, statuses_vector, values_vector), - ) in enumerate(zip(zip(*timestamps), zip(*statuses), zip(*values))): - online_features_response.results.append( - GetOnlineFeaturesResponse.FeatureVector( - values=apply_list_mapping(values_vector, indexes), - statuses=apply_list_mapping(statuses_vector, indexes), - event_timestamps=apply_list_mapping(timestamp_vector, indexes), - ) + for timestamp_vector, statuses_vector, values_vector in feature_data: + response_vector = construct_response_feature_vector( + values_vector, statuses_vector, timestamp_vector, indexes, output_len ) + online_features_response.results.append(response_vector) + + +def _convert_entity_key_to_proto_to_dict( + entity_key_vals: List[EntityKeyProto], +) -> Dict[str, List[ValueProto]]: + entity_dict: Dict[str, List[ValueProto]] = {} + for entity_key_val in entity_key_vals: + if entity_key_val is not None: + for join_key, entity_value in zip( + entity_key_val.join_keys, entity_key_val.entity_values + ): + if join_key not in entity_dict: + entity_dict[join_key] = [] + # python_entity_value = _proto_value_to_value_type(entity_value) + entity_dict[join_key].append(entity_value) + return entity_dict def _get_features( @@ -995,7 +1151,7 @@ def _get_online_request_context( entityless_case = DUMMY_ENTITY_NAME in [ entity_name for feature_view in feature_views - for entity_name in feature_view.entities + for entity_name in (feature_view.entities or []) ] return ( @@ -1058,7 +1214,13 @@ def _prepare_entities_to_read_from_online_store( odfv_entities: List[Entity] = [] request_source_keys: List[str] = [] for on_demand_feature_view in requested_on_demand_feature_views: - odfv_entities.append(*getattr(on_demand_feature_view, "entities", [])) + entities_for_odfv = getattr(on_demand_feature_view, "entities", []) + if len(entities_for_odfv) > 0 and isinstance(entities_for_odfv[0], str): + entities_for_odfv = [ + registry.get_entity(entity_name, project, allow_cache=True) + for entity_name in entities_for_odfv + ] + odfv_entities.extend(entities_for_odfv) for source in on_demand_feature_view.source_request_sources: source_schema = on_demand_feature_view.source_request_sources[source].schema for column in source_schema: @@ -1130,33 +1292,32 @@ def _convert_rows_to_protobuf( requested_features: List[str], read_rows: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]], ) -> List[Tuple[List[Timestamp], List["FieldStatus.ValueType"], List[ValueProto]]]: - # Each row is a set of features for a given entity key. - # We only need to convert the data to Protobuf once. + # Pre-calculate the length to avoid repeated calculations + n_rows = len(read_rows) + + # Create single instances of commonly used values null_value = ValueProto() - read_row_protos = [] - for read_row in read_rows: - row_ts_proto = Timestamp() - row_ts, feature_data = read_row - # TODO (Ly): reuse whatever timestamp if row_ts is None? - if row_ts is not None: - row_ts_proto.FromDatetime(row_ts) - event_timestamps = [row_ts_proto] * len(requested_features) - if feature_data is None: - statuses = [FieldStatus.NOT_FOUND] * len(requested_features) - values = [null_value] * len(requested_features) - else: - statuses = [] - values = [] - for feature_name in requested_features: - # Make sure order of data is the same as requested_features. - if feature_name not in feature_data: - statuses.append(FieldStatus.NOT_FOUND) - values.append(null_value) - else: - statuses.append(FieldStatus.PRESENT) - values.append(feature_data[feature_name]) - read_row_protos.append((event_timestamps, statuses, values)) - return read_row_protos + null_status = FieldStatus.NOT_FOUND + null_timestamp = Timestamp() + present_status = FieldStatus.PRESENT + + requested_features_vectors = [] + for feature_name in requested_features: + ts_vector = [null_timestamp] * n_rows + status_vector = [null_status] * n_rows + value_vector = [null_value] * n_rows + for idx, read_row in enumerate(read_rows): + row_ts_proto = Timestamp() + row_ts, feature_data = read_row + # TODO (Ly): reuse whatever timestamp if row_ts is None? + if row_ts is not None: + row_ts_proto.FromDatetime(row_ts) + ts_vector[idx] = row_ts_proto + if (feature_data is not None) and (feature_name in feature_data): + status_vector[idx] = present_status + value_vector[idx] = feature_data[feature_name] + requested_features_vectors.append((ts_vector, status_vector, value_vector)) + return requested_features_vectors def has_all_tags( @@ -1192,6 +1353,10 @@ def _utc_now() -> datetime: return datetime.now(tz=timezone.utc) +def _serialize_vector_to_float_list(vector: List[float]) -> ValueProto: + return ValueProto(float_list_val=FloatListProto(val=vector)) + + def _build_retrieve_online_document_record( entity_key: Union[str, bytes], feature_value: Union[str, bytes], diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml index 10ad007fa90..8a1c5b70c3b 100644 --- a/sdk/python/pyproject.toml +++ b/sdk/python/pyproject.toml @@ -6,7 +6,7 @@ select = ["E","F","W","I"] ignore = ["E203", "E266", "E501", "E721"] [tool.ruff.lint.isort] -known-first-party = ["feast", "feast", "feast_serving_server", "feast_core_server"] +known-first-party = ["feast", "feast_serving_server", "feast_core_server"] default-section = "third-party" [tool.mypy] diff --git a/sdk/python/pytest.ini b/sdk/python/pytest.ini index a0736767601..d79459c0d0e 100644 --- a/sdk/python/pytest.ini +++ b/sdk/python/pytest.ini @@ -4,6 +4,7 @@ asyncio_mode = auto markers = universal_offline_stores: mark a test as using all offline stores. universal_online_stores: mark a test as using all online stores. + rbac_remote_integration_test: mark a integration test related to rbac and remote functionality. env = IS_TEST=True diff --git a/sdk/python/requirements/py3.10-ci-requirements.txt b/sdk/python/requirements/py3.10-ci-requirements.txt index 9ac4937582b..239d6b4de30 100644 --- a/sdk/python/requirements/py3.10-ci-requirements.txt +++ b/sdk/python/requirements/py3.10-ci-requirements.txt @@ -1,14 +1,14 @@ # This file was autogenerated by uv via the following command: # uv pip compile -p 3.10 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.10-ci-requirements.txt -aiobotocore==2.15.2 +aiobotocore==2.20.0 # via feast (setup.py) -aiohappyeyeballs==2.4.3 +aiohappyeyeballs==2.4.6 # via aiohttp -aiohttp==3.10.10 +aiohttp==3.11.13 # via aiobotocore aioitertools==0.12.0 # via aiobotocore -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp alabaster==0.7.16 # via sphinx @@ -16,7 +16,7 @@ altair==4.2.2 # via great-expectations annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # httpx # jupyter-server @@ -25,7 +25,9 @@ anyio==4.6.2.post1 appnope==0.1.4 # via ipykernel argon2-cffi==23.1.0 - # via jupyter-server + # via + # jupyter-server + # minio argon2-cffi-bindings==21.2.0 # via argon2-cffi arrow==1.3.0 @@ -34,46 +36,50 @@ asn1crypto==1.5.1 # via snowflake-connector-python assertpy==1.1 # via feast (setup.py) -asttokens==2.4.1 +asttokens==3.0.0 # via stack-data async-lru==2.0.4 # via jupyterlab async-property==0.2.2 # via python-keycloak -async-timeout==4.0.3 +async-timeout==5.0.1 # via # aiohttp # redis -atpublic==5.0 +atpublic==5.1 # via ibis-framework -attrs==24.2.0 +attrs==25.1.0 # via # aiohttp + # jsonlines # jsonschema # referencing -azure-core==1.31.0 +azure-core==1.32.0 # via # azure-identity # azure-storage-blob -azure-identity==1.19.0 +azure-identity==1.20.0 # via feast (setup.py) -azure-storage-blob==12.23.1 +azure-storage-blob==12.24.1 # via feast (setup.py) -babel==2.16.0 +babel==2.17.0 # via # jupyterlab-server # sphinx -beautifulsoup4==4.12.3 - # via nbconvert -bigtree==0.21.3 +beautifulsoup4==4.13.3 + # via + # docling + # nbconvert +bigtree==0.25.0 # via feast (setup.py) -bleach==6.1.0 +bleach[css]==6.2.0 # via nbconvert -boto3==1.35.36 +boto3==1.36.23 # via # feast (setup.py) + # ikvpy # moto -botocore==1.35.36 +botocore==1.36.23 # via # aiobotocore # boto3 @@ -84,12 +90,13 @@ build==1.2.2.post1 # feast (setup.py) # pip-tools # singlestoredb -cachetools==5.5.0 +cachetools==5.5.2 # via google-auth cassandra-driver==3.29.2 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via + # docling # elastic-transport # httpcore # httpx @@ -99,24 +106,27 @@ certifi==2024.8.30 # snowflake-connector-python cffi==1.17.1 # via + # feast (setup.py) # argon2-cffi-bindings # cryptography + # ikvpy # snowflake-connector-python cfgv==3.4.0 # via pre-commit -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via # requests # snowflake-connector-python -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # geomet # great-expectations # pip-tools + # typer # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via @@ -128,9 +138,11 @@ comm==0.2.2 # ipywidgets couchbase==4.3.2 # via feast (setup.py) -coverage[toml]==7.6.4 +couchbase-columnar==1.0.0 + # via feast (setup.py) +coverage[toml]==7.6.12 # via pytest-cov -cryptography==42.0.8 +cryptography==43.0.3 # via # feast (setup.py) # azure-identity @@ -144,42 +156,57 @@ cryptography==42.0.8 # snowflake-connector-python # types-pyopenssl # types-redis -cython==3.0.11 +cython==3.0.12 # via thriftpy2 -dask[dataframe]==2024.10.0 - # via - # feast (setup.py) - # dask-expr -dask-expr==1.1.16 - # via dask -db-dtypes==1.3.0 +dask[dataframe]==2025.2.0 + # via feast (setup.py) +db-dtypes==1.4.1 # via google-cloud-bigquery -debugpy==1.8.7 +debugpy==1.8.12 # via ipykernel -decorator==5.1.1 +decorator==5.2.1 # via ipython defusedxml==0.7.1 # via nbconvert -deltalake==0.20.2 +deltalake==0.25.1 # via feast (setup.py) deprecation==2.1.0 # via python-keycloak dill==0.3.9 - # via feast (setup.py) + # via + # feast (setup.py) + # multiprocess distlib==0.3.9 # via virtualenv docker==7.1.0 # via testcontainers +docling==2.24.0 + # via feast (setup.py) +docling-core[chunking]==2.20.0 + # via + # docling + # docling-ibm-models + # docling-parse +docling-ibm-models==3.4.0 + # via docling +docling-parse==3.4.0 + # via docling docutils==0.19 # via sphinx -duckdb==1.1.2 +duckdb==1.1.3 # via ibis-framework -elastic-transport==8.15.1 +easyocr==1.7.2 + # via docling +elastic-transport==8.17.0 # via elasticsearch -elasticsearch==8.15.1 +elasticsearch==8.17.1 # via feast (setup.py) entrypoints==0.4 # via altair +environs==9.5.0 + # via pymilvus +et-xmlfile==2.0.0 + # via openpyxl exceptiongroup==1.2.2 # via # anyio @@ -187,18 +214,23 @@ exceptiongroup==1.2.2 # pytest execnet==2.1.1 # via pytest-xdist -executing==2.1.0 +executing==2.2.0 # via stack-data -faiss-cpu==1.9.0 +faiss-cpu==1.10.0 # via feast (setup.py) -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fastjsonschema==2.20.0 +fastjsonschema==2.21.1 # via nbformat -filelock==3.16.1 +filelock==3.17.0 # via + # huggingface-hub # snowflake-connector-python + # torch + # transformers # virtualenv +filetype==1.2.0 + # via docling fqdn==1.5.1 # via jsonschema frozenlist==1.5.0 @@ -209,9 +241,11 @@ fsspec==2024.9.0 # via # feast (setup.py) # dask + # huggingface-hub + # torch geomet==0.2.1.post1 # via cassandra-driver -google-api-core[grpc]==2.22.0 +google-api-core[grpc]==2.24.1 # via # feast (setup.py) # google-cloud-bigquery @@ -220,7 +254,7 @@ google-api-core[grpc]==2.22.0 # google-cloud-core # google-cloud-datastore # google-cloud-storage -google-auth==2.35.0 +google-auth==2.38.0 # via # google-api-core # google-cloud-bigquery @@ -230,21 +264,21 @@ google-auth==2.35.0 # google-cloud-datastore # google-cloud-storage # kubernetes -google-cloud-bigquery[pandas]==3.26.0 +google-cloud-bigquery[pandas]==3.29.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.27.0 +google-cloud-bigquery-storage==2.28.0 # via feast (setup.py) -google-cloud-bigtable==2.26.0 +google-cloud-bigtable==2.28.1 # via feast (setup.py) -google-cloud-core==2.4.1 +google-cloud-core==2.4.2 # via # google-cloud-bigquery # google-cloud-bigtable # google-cloud-datastore # google-cloud-storage -google-cloud-datastore==2.20.1 +google-cloud-datastore==2.20.2 # via feast (setup.py) -google-cloud-storage==2.18.2 +google-cloud-storage==2.19.0 # via feast (setup.py) google-crc32c==1.6.0 # via @@ -254,7 +288,7 @@ google-resumable-media==2.7.2 # via # google-cloud-bigquery # google-cloud-storage -googleapis-common-protos[grpc]==1.65.0 +googleapis-common-protos[grpc]==1.68.0 # via # feast (setup.py) # google-api-core @@ -262,9 +296,11 @@ googleapis-common-protos[grpc]==1.65.0 # grpcio-status great-expectations==0.18.22 # via feast (setup.py) -grpc-google-iam-v1==0.13.1 +greenlet==3.1.1 + # via sqlalchemy +grpc-google-iam-v1==0.14.0 # via google-cloud-bigtable -grpcio==1.67.0 +grpcio==1.70.0 # via # feast (setup.py) # google-api-core @@ -275,16 +311,20 @@ grpcio==1.67.0 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy + # pymilvus # qdrant-client -grpcio-health-checking==1.62.3 +grpcio-health-checking==1.70.0 # via feast (setup.py) -grpcio-reflection==1.62.3 +grpcio-reflection==1.70.0 # via feast (setup.py) -grpcio-status==1.62.3 - # via google-api-core -grpcio-testing==1.62.3 +grpcio-status==1.70.0 + # via + # google-api-core + # ikvpy +grpcio-testing==1.70.0 # via feast (setup.py) -grpcio-tools==1.62.3 +grpcio-tools==1.70.0 # via # feast (setup.py) # qdrant-client @@ -296,7 +336,7 @@ h11==0.14.0 # via # httpcore # uvicorn -h2==4.1.0 +h2==4.2.0 # via httpx happybase==1.2.0 # via feast (setup.py) @@ -304,9 +344,9 @@ hazelcast-python-client==5.5.0 # via feast (setup.py) hiredis==2.4.0 # via feast (setup.py) -hpack==4.0.0 +hpack==4.1.0 # via h2 -httpcore==1.0.6 +httpcore==1.0.7 # via httpx httptools==0.6.4 # via uvicorn @@ -316,15 +356,21 @@ httpx[http2]==0.27.2 # jupyterlab # python-keycloak # qdrant-client -hyperframe==6.0.1 +huggingface-hub==0.29.1 + # via + # docling + # docling-ibm-models + # tokenizers + # transformers +hyperframe==6.1.0 # via h2 -ibis-framework[duckdb]==9.5.0 +ibis-framework[duckdb, mssql]==9.5.0 # via # feast (setup.py) # ibis-substrait ibis-substrait==4.0.1 # via feast (setup.py) -identify==2.6.1 +identify==2.6.8 # via pre-commit idna==3.10 # via @@ -334,9 +380,13 @@ idna==3.10 # requests # snowflake-connector-python # yarl +ikvpy==0.0.36 + # via feast (setup.py) +imageio==2.37.0 + # via scikit-image imagesize==1.4.1 # via sphinx -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via # build # dask @@ -344,7 +394,7 @@ iniconfig==2.0.0 # via pytest ipykernel==6.29.5 # via jupyterlab -ipython==8.29.0 +ipython==8.32.0 # via # great-expectations # ipykernel @@ -355,9 +405,9 @@ isodate==0.7.2 # via azure-storage-blob isoduration==20.11.0 # via jsonschema -jedi==0.19.1 +jedi==0.19.2 # via ipython -jinja2==3.1.4 +jinja2==3.1.5 # via # feast (setup.py) # altair @@ -368,22 +418,29 @@ jinja2==3.1.4 # moto # nbconvert # sphinx + # torch jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -json5==0.9.25 +json5==0.10.0 # via jupyterlab-server +jsonlines==3.1.0 + # via docling-ibm-models jsonpatch==1.33 # via great-expectations jsonpointer==3.0.0 # via # jsonpatch # jsonschema +jsonref==1.1.0 + # via docling-core jsonschema[format-nongpl]==4.23.0 # via # feast (setup.py) # altair + # docling-core # great-expectations # jupyter-events # jupyterlab-server @@ -404,11 +461,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat -jupyter-events==0.10.0 +jupyter-events==0.12.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.2 +jupyter-server==2.15.0 # via # jupyter-lsp # jupyterlab @@ -417,7 +474,7 @@ jupyter-server==2.14.2 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.5 +jupyterlab==4.3.5 # via notebook jupyterlab-pygments==0.3.0 # via nbconvert @@ -431,38 +488,59 @@ jwcrypto==1.5.6 # via python-keycloak kubernetes==20.13.0 # via feast (setup.py) +latex2mathml==3.77.0 + # via docling-core +lazy-loader==0.4 + # via scikit-image locket==1.0.0 # via partd +lxml==5.3.1 + # via + # docling + # python-docx + # python-pptx +lz4==4.4.3 + # via trino makefun==1.15.6 # via great-expectations markdown-it-py==3.0.0 # via rich +marko==2.1.2 + # via docling markupsafe==3.0.2 # via # jinja2 # nbconvert # werkzeug -marshmallow==3.23.0 - # via great-expectations +marshmallow==3.26.1 + # via + # environs + # great-expectations matplotlib-inline==0.1.7 # via # ipykernel # ipython mdurl==0.1.2 # via markdown-it-py -minio==7.1.0 +milvus-lite==2.4.11 + # via pymilvus +minio==7.2.11 # via feast (setup.py) -mistune==3.0.2 +mistune==3.1.2 # via # great-expectations # nbconvert -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) mock==2.0.0 # via feast (setup.py) moto==4.2.14 # via feast (setup.py) -msal==1.31.0 +mpire[dill]==2.10.2 + # via semchunk +mpmath==1.3.0 + # via sympy +msal==1.31.1 # via # azure-identity # msal-extensions @@ -470,8 +548,11 @@ msal-extensions==1.2.0 # via azure-identity multidict==6.1.0 # via + # aiobotocore # aiohttp # yarl +multiprocess==0.70.17 + # via mpire mypy==1.11.2 # via # feast (setup.py) @@ -480,9 +561,9 @@ mypy-extensions==1.0.0 # via mypy mypy-protobuf==3.3.0 # via feast (setup.py) -nbclient==0.10.0 +nbclient==0.10.2 # via nbconvert -nbconvert==7.16.4 +nbconvert==7.16.6 # via jupyter-server nbformat==5.10.4 # via @@ -492,9 +573,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel +networkx==3.4.2 + # via + # scikit-image + # torch +ninja==1.11.1.3 + # via easyocr nodeenv==1.9.1 # via pre-commit -notebook==7.2.2 +notebook==7.3.2 # via great-expectations notebook-shim==0.2.4 # via @@ -506,18 +593,34 @@ numpy==1.26.4 # altair # dask # db-dtypes + # docling-ibm-models + # easyocr # faiss-cpu # great-expectations # ibis-framework + # imageio + # opencv-python-headless # pandas # pyarrow # qdrant-client + # safetensors + # scikit-image # scipy + # shapely + # tifffile + # torchvision + # transformers oauthlib==3.2.2 # via requests-oauthlib +opencv-python-headless==4.11.0.86 + # via + # docling-ibm-models + # easyocr +openpyxl==3.1.5 + # via docling overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -527,27 +630,34 @@ packaging==24.1 # google-cloud-bigquery # great-expectations # gunicorn + # huggingface-hub # ibis-framework # ibis-substrait # ipykernel + # jupyter-events # jupyter-server # jupyterlab # jupyterlab-server + # lazy-loader # marshmallow # nbconvert # pytest + # scikit-image # snowflake-connector-python # sphinx + # transformers pandas==2.2.3 # via # feast (setup.py) # altair # dask - # dask-expr # db-dtypes + # docling + # docling-core # google-cloud-bigquery # great-expectations # ibis-framework + # pymilvus # snowflake-connector-python pandocfilters==1.5.1 # via nbconvert @@ -559,11 +669,22 @@ parsy==2.1 # via ibis-framework partd==1.4.2 # via dask -pbr==6.1.0 +pbr==6.1.1 # via mock pexpect==4.9.0 # via ipython -pip==24.3.1 +pillow==11.1.0 + # via + # docling + # docling-core + # docling-ibm-models + # docling-parse + # easyocr + # imageio + # python-pptx + # scikit-image + # torchvision +pip==25.0.1 # via pip-tools pip-tools==7.4.1 # via feast (setup.py) @@ -582,21 +703,23 @@ portalocker==2.10.1 # qdrant-client pre-commit==3.3.1 # via feast (setup.py) -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via # feast (setup.py) # jupyter-server -prompt-toolkit==3.0.48 +prompt-toolkit==3.0.50 # via ipython -propcache==0.2.0 - # via yarl -proto-plus==1.25.0 +propcache==0.3.0 + # via + # aiohttp + # yarl +proto-plus==1.26.0 # via # google-api-core # google-cloud-bigquery-storage # google-cloud-bigtable # google-cloud-datastore -protobuf==4.25.5 +protobuf==5.29.3 # via # feast (setup.py) # google-api-core @@ -610,18 +733,20 @@ protobuf==4.25.5 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy # mypy-protobuf # proto-plus + # pymilvus # substrait psutil==5.9.0 # via # feast (setup.py) # ipykernel -psycopg[binary, pool]==3.2.3 +psycopg[binary, pool]==3.2.5 # via feast (setup.py) -psycopg-binary==3.2.3 +psycopg-binary==3.2.5 # via psycopg -psycopg-pool==3.2.3 +psycopg-pool==3.2.5 # via psycopg ptyprocess==0.7.0 # via @@ -638,7 +763,7 @@ py4j==0.10.9.7 pyarrow==17.0.0 # via # feast (setup.py) - # dask-expr + # dask # db-dtypes # deltalake # google-cloud-bigquery @@ -654,44 +779,62 @@ pyasn1-modules==0.4.1 # via google-auth pybindgen==0.22.1 # via feast (setup.py) +pyclipper==1.3.0.post6 + # via easyocr pycparser==2.22 # via cffi -pydantic==2.9.2 +pycryptodome==3.21.0 + # via minio +pydantic==2.10.6 # via # feast (setup.py) + # docling + # docling-core + # docling-ibm-models + # docling-parse # fastapi # great-expectations + # pydantic-settings # qdrant-client -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pydantic-settings==2.8.0 + # via docling +pygments==2.19.1 # via # feast (setup.py) # ipython + # mpire # nbconvert # rich # sphinx -pyjwt[crypto]==2.9.0 +pyjwt[crypto]==2.10.1 # via # feast (setup.py) # msal # singlestoredb # snowflake-connector-python -pymssql==2.3.1 +pymilvus==2.4.9 + # via feast (setup.py) +pymssql==2.3.2 # via feast (setup.py) pymysql==1.1.1 # via feast (setup.py) pyodbc==5.2.0 - # via feast (setup.py) -pyopenssl==24.2.1 + # via + # feast (setup.py) + # ibis-framework +pyopenssl==24.3.0 # via snowflake-connector-python -pyparsing==3.2.0 +pyparsing==3.2.1 # via great-expectations +pypdfium2==4.30.1 + # via docling pyproject-hooks==1.2.0 # via # build # pip-tools -pyspark==3.5.3 +pyspark==3.5.4 # via feast (setup.py) pytest==7.4.4 # via @@ -709,7 +852,7 @@ pytest-asyncio==0.23.8 # via feast (setup.py) pytest-benchmark==3.4.1 # via feast (setup.py) -pytest-cov==5.0.0 +pytest-cov==6.0.0 # via feast (setup.py) pytest-env==1.1.3 # via feast (setup.py) @@ -723,8 +866,11 @@ pytest-timeout==1.4.2 # via feast (setup.py) pytest-xdist==3.6.1 # via feast (setup.py) +python-bidi==0.6.6 + # via easyocr python-dateutil==2.9.0.post0 # via + # aiobotocore # arrow # botocore # google-cloud-bigquery @@ -735,13 +881,20 @@ python-dateutil==2.9.0.post0 # moto # pandas # trino +python-docx==1.1.2 + # via docling python-dotenv==1.0.1 - # via uvicorn -python-json-logger==2.0.7 + # via + # environs + # pydantic-settings + # uvicorn +python-json-logger==3.2.1 # via jupyter-events python-keycloak==4.2.2 # via feast (setup.py) -pytz==2024.2 +python-pptx==1.0.2 + # via docling +pytz==2025.1 # via # great-expectations # ibis-framework @@ -752,39 +905,46 @@ pyyaml==6.0.2 # via # feast (setup.py) # dask + # docling-core + # easyocr + # huggingface-hub # ibis-substrait # jupyter-events # kubernetes # pre-commit # responses + # transformers # uvicorn -pyzmq==26.2.0 +pyzmq==26.2.1 # via # ipykernel # jupyter-client # jupyter-server -qdrant-client==1.12.0 +qdrant-client==1.13.2 # via feast (setup.py) redis==4.6.0 # via feast (setup.py) -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications # jupyter-events -regex==2024.9.11 +regex==2024.11.6 # via # feast (setup.py) # parsimonious + # transformers requests==2.32.3 # via # feast (setup.py) # azure-core # docker + # docling # google-api-core # google-cloud-bigquery # google-cloud-storage # great-expectations + # huggingface-hub # jupyterlab-server # kubernetes # moto @@ -796,12 +956,13 @@ requests==2.32.3 # singlestoredb # snowflake-connector-python # sphinx + # transformers # trino requests-oauthlib==2.0.0 # via kubernetes requests-toolbelt==1.0.0 # via python-keycloak -responses==0.25.3 +responses==0.25.6 # via moto rfc3339-validator==0.1.4 # via @@ -811,40 +972,60 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.9.3 - # via ibis-framework -rpds-py==0.20.0 +rich==13.9.4 + # via + # ibis-framework + # typer +rpds-py==0.23.1 # via # jsonschema # referencing rsa==4.9 # via google-auth +rtree==1.3.0 + # via docling ruamel-yaml==0.17.40 # via great-expectations ruamel-yaml-clib==0.2.12 # via ruamel-yaml -ruff==0.7.1 +ruff==0.9.7 # via feast (setup.py) -s3transfer==0.10.3 +s3transfer==0.11.2 # via boto3 -scipy==1.14.1 - # via great-expectations +safetensors[torch]==0.5.2 + # via + # docling-ibm-models + # transformers +scikit-image==0.25.2 + # via easyocr +scipy==1.15.2 + # via + # docling + # easyocr + # great-expectations + # scikit-image +semchunk==2.2.2 + # via docling-core send2trash==1.8.3 # via jupyter-server -setuptools==75.2.0 +setuptools==75.8.0 # via # grpcio-tools # jupyterlab # kubernetes + # pbr # pip-tools + # pymilvus # singlestoredb +shapely==2.0.7 + # via easyocr +shellingham==1.5.4 + # via typer singlestoredb==1.7.2 # via feast (setup.py) -six==1.16.0 +six==1.17.0 # via - # asttokens # azure-core - # bleach # geomet # happybase # kubernetes @@ -858,7 +1039,7 @@ sniffio==1.3.1 # httpx snowballstemmer==2.2.0 # via sphinx -snowflake-connector-python[pandas]==3.12.3 +snowflake-connector-python[pandas]==3.13.2 # via feast (setup.py) sortedcontainers==2.4.0 # via snowflake-connector-python @@ -878,37 +1059,46 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) sqlglot==25.20.2 # via ibis-framework -sqlite-vec==0.1.1 +sqlite-vec==0.1.6 # via feast (setup.py) -sqlparams==6.1.0 +sqlparams==6.2.0 # via singlestoredb stack-data==0.6.3 # via ipython -starlette==0.41.2 +starlette==0.45.3 # via fastapi substrait==0.23.0 # via ibis-substrait +sympy==1.13.3 + # via torch tabulate==0.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # docling-core + # docling-parse tenacity==8.5.0 # via feast (setup.py) terminado==0.18.1 # via # jupyter-server # jupyter-server-terminals -testcontainers==4.4.0 +testcontainers==4.8.2 # via feast (setup.py) thriftpy2==0.5.2 # via happybase +tifffile==2025.2.18 + # via scikit-image tinycss2==1.4.0 - # via nbconvert + # via bleach +tokenizers==0.19.1 + # via transformers toml==0.10.2 # via feast (setup.py) -tomli==2.0.2 +tomli==2.2.1 # via # build # coverage @@ -926,7 +1116,19 @@ toolz==0.12.1 # dask # ibis-framework # partd -tornado==6.4.1 +torch==2.2.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr + # safetensors + # torchvision +torchvision==0.17.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr +tornado==6.4.2 # via # ipykernel # jupyter-client @@ -934,10 +1136,17 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tqdm==4.66.6 +tqdm==4.67.1 # via # feast (setup.py) + # docling + # docling-ibm-models # great-expectations + # huggingface-hub + # milvus-lite + # mpire + # semchunk + # transformers traitlets==5.14.3 # via # comm @@ -953,37 +1162,45 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat -trino==0.330.0 +transformers==4.42.4 + # via + # docling-core + # docling-ibm-models +trino==0.333.0 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) -types-cffi==1.16.0.20240331 +typer==0.12.5 + # via + # docling + # docling-core +types-cffi==1.16.0.20241221 # via types-pyopenssl types-protobuf==3.19.22 # via # feast (setup.py) # mypy-protobuf -types-pymysql==1.1.0.20240524 +types-pymysql==1.1.0.20241103 # via feast (setup.py) types-pyopenssl==24.1.0.20240722 # via types-redis -types-python-dateutil==2.9.0.20241003 +types-python-dateutil==2.9.0.20241206 # via # feast (setup.py) # arrow -types-pytz==2024.2.0.20241003 +types-pytz==2025.1.0.20250204 # via feast (setup.py) -types-pyyaml==6.0.12.20240917 +types-pyyaml==6.0.12.20241230 # via feast (setup.py) types-redis==4.6.0.20241004 # via feast (setup.py) types-requests==2.30.0.0 # via feast (setup.py) -types-setuptools==75.2.0.20241025 +types-setuptools==75.8.0.20250225 # via # feast (setup.py) # types-cffi -types-tabulate==0.9.0.20240106 +types-tabulate==0.9.0.20241207 # via feast (setup.py) types-urllib3==1.26.25.14 # via types-requests @@ -994,34 +1211,47 @@ typing-extensions==4.12.2 # azure-core # azure-identity # azure-storage-blob + # beautifulsoup4 + # docling-core # fastapi # great-expectations + # huggingface-hub # ibis-framework # ipython # jwcrypto + # minio + # mistune # multidict # mypy # psycopg # psycopg-pool # pydantic # pydantic-core + # python-docx + # python-pptx + # referencing # rich # snowflake-connector-python # sqlalchemy # testcontainers + # torch # typeguard + # typer # uvicorn -tzdata==2024.2 +tzdata==2025.1 # via pandas -tzlocal==5.2 +tzlocal==5.3 # via # great-expectations # trino +ujson==5.10.0 + # via pymilvus uri-template==1.3.0 # via jsonschema -urllib3==2.2.3 +urllib3==2.3.0 # via # feast (setup.py) + # aiobotocore # botocore # docker # elastic-transport @@ -1032,11 +1262,11 @@ urllib3==2.2.3 # requests # responses # testcontainers -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn @@ -1044,11 +1274,11 @@ virtualenv==20.23.0 # via # feast (setup.py) # pre-commit -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.8.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1058,23 +1288,27 @@ websocket-client==1.8.0 # via # jupyter-server # kubernetes -websockets==13.1 +websockets==15.0 # via uvicorn -werkzeug==3.0.6 +werkzeug==3.1.3 # via moto -wheel==0.44.0 +wheel==0.45.1 # via # pip-tools # singlestoredb widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.2 # via # aiobotocore # testcontainers +xlsxwriter==3.2.2 + # via python-pptx xmltodict==0.14.2 # via moto -yarl==1.16.0 +yarl==1.18.3 # via aiohttp -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata +zstandard==0.23.0 + # via trino diff --git a/sdk/python/requirements/py3.10-requirements.txt b/sdk/python/requirements/py3.10-requirements.txt index dd2ed6951c9..ea4baadec05 100644 --- a/sdk/python/requirements/py3.10-requirements.txt +++ b/sdk/python/requirements/py3.10-requirements.txt @@ -2,43 +2,41 @@ # uv pip compile -p 3.10 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.10-requirements.txt annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # starlette # watchfiles -attrs==24.2.0 +attrs==25.1.0 # via # jsonschema # referencing -bigtree==0.21.3 +bigtree==0.25.0 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via requests -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via requests -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via feast (setup.py) -dask[dataframe]==2024.10.0 - # via - # feast (setup.py) - # dask-expr -dask-expr==1.1.16 - # via dask +dask[dataframe]==2025.2.0 + # via feast (setup.py) dill==0.3.9 # via feast (setup.py) exceptiongroup==1.2.2 # via anyio -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fsspec==2024.10.0 +fsspec==2025.2.0 # via dask +greenlet==3.1.1 + # via sqlalchemy gunicorn==23.0.0 # via # feast (setup.py) @@ -51,9 +49,9 @@ idna==3.10 # via # anyio # requests -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via dask -jinja2==3.1.4 +jinja2==3.1.5 # via feast (setup.py) jsonschema==4.23.0 # via feast (setup.py) @@ -63,9 +61,9 @@ locket==1.0.0 # via partd markupsafe==3.0.2 # via jinja2 -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) -mypy==1.13.0 +mypy==1.15.0 # via sqlalchemy mypy-extensions==1.0.0 # via mypy @@ -74,7 +72,7 @@ numpy==1.26.4 # feast (setup.py) # dask # pandas -packaging==24.1 +packaging==24.2 # via # dask # gunicorn @@ -82,57 +80,56 @@ pandas==2.2.3 # via # feast (setup.py) # dask - # dask-expr partd==1.4.2 # via dask -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via feast (setup.py) -protobuf==4.25.5 +protobuf==5.29.3 # via feast (setup.py) -psutil==6.1.0 +psutil==7.0.0 # via feast (setup.py) pyarrow==18.0.0 # via # feast (setup.py) - # dask-expr -pydantic==2.9.2 + # dask +pydantic==2.10.6 # via # feast (setup.py) # fastapi -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pygments==2.19.1 # via feast (setup.py) -pyjwt==2.9.0 +pyjwt==2.10.1 # via feast (setup.py) python-dateutil==2.9.0.post0 # via pandas python-dotenv==1.0.1 # via uvicorn -pytz==2024.2 +pytz==2025.1 # via pandas pyyaml==6.0.2 # via # feast (setup.py) # dask # uvicorn -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications requests==2.32.3 # via feast (setup.py) -rpds-py==0.20.0 +rpds-py==0.23.1 # via # jsonschema # referencing -six==1.16.0 +six==1.17.0 # via python-dateutil sniffio==1.3.1 # via anyio -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) -starlette==0.41.2 +starlette==0.45.3 # via fastapi tabulate==0.9.0 # via feast (setup.py) @@ -140,15 +137,15 @@ tenacity==8.5.0 # via feast (setup.py) toml==0.10.2 # via feast (setup.py) -tomli==2.0.2 +tomli==2.2.1 # via mypy toolz==1.0.0 # via # dask # partd -tqdm==4.66.6 +tqdm==4.67.1 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) typing-extensions==4.12.2 # via @@ -157,24 +154,25 @@ typing-extensions==4.12.2 # mypy # pydantic # pydantic-core + # referencing # sqlalchemy # typeguard # uvicorn -tzdata==2024.2 +tzdata==2025.1 # via pandas -urllib3==2.2.3 +urllib3==2.3.0 # via requests -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn -websockets==13.1 +websockets==15.0 # via uvicorn -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata diff --git a/sdk/python/requirements/py3.11-ci-requirements.txt b/sdk/python/requirements/py3.11-ci-requirements.txt index feaafa36e3f..af6b5b469c9 100644 --- a/sdk/python/requirements/py3.11-ci-requirements.txt +++ b/sdk/python/requirements/py3.11-ci-requirements.txt @@ -1,14 +1,14 @@ # This file was autogenerated by uv via the following command: # uv pip compile -p 3.11 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.11-ci-requirements.txt -aiobotocore==2.15.2 +aiobotocore==2.20.0 # via feast (setup.py) -aiohappyeyeballs==2.4.3 +aiohappyeyeballs==2.4.6 # via aiohttp -aiohttp==3.10.10 +aiohttp==3.11.13 # via aiobotocore aioitertools==0.12.0 # via aiobotocore -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp alabaster==0.7.16 # via sphinx @@ -16,7 +16,7 @@ altair==4.2.2 # via great-expectations annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # httpx # jupyter-server @@ -25,7 +25,9 @@ anyio==4.6.2.post1 appnope==0.1.4 # via ipykernel argon2-cffi==23.1.0 - # via jupyter-server + # via + # jupyter-server + # minio argon2-cffi-bindings==21.2.0 # via argon2-cffi arrow==1.3.0 @@ -34,44 +36,48 @@ asn1crypto==1.5.1 # via snowflake-connector-python assertpy==1.1 # via feast (setup.py) -asttokens==2.4.1 +asttokens==3.0.0 # via stack-data async-lru==2.0.4 # via jupyterlab async-property==0.2.2 # via python-keycloak -async-timeout==4.0.3 +async-timeout==5.0.1 # via redis -atpublic==5.0 +atpublic==5.1 # via ibis-framework -attrs==24.2.0 +attrs==25.1.0 # via # aiohttp + # jsonlines # jsonschema # referencing -azure-core==1.31.0 +azure-core==1.32.0 # via # azure-identity # azure-storage-blob -azure-identity==1.19.0 +azure-identity==1.20.0 # via feast (setup.py) -azure-storage-blob==12.23.1 +azure-storage-blob==12.24.1 # via feast (setup.py) -babel==2.16.0 +babel==2.17.0 # via # jupyterlab-server # sphinx -beautifulsoup4==4.12.3 - # via nbconvert -bigtree==0.21.3 +beautifulsoup4==4.13.3 + # via + # docling + # nbconvert +bigtree==0.25.0 # via feast (setup.py) -bleach==6.1.0 +bleach[css]==6.2.0 # via nbconvert -boto3==1.35.36 +boto3==1.36.23 # via # feast (setup.py) + # ikvpy # moto -botocore==1.35.36 +botocore==1.36.23 # via # aiobotocore # boto3 @@ -82,12 +88,13 @@ build==1.2.2.post1 # feast (setup.py) # pip-tools # singlestoredb -cachetools==5.5.0 +cachetools==5.5.2 # via google-auth cassandra-driver==3.29.2 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via + # docling # elastic-transport # httpcore # httpx @@ -97,24 +104,27 @@ certifi==2024.8.30 # snowflake-connector-python cffi==1.17.1 # via + # feast (setup.py) # argon2-cffi-bindings # cryptography + # ikvpy # snowflake-connector-python cfgv==3.4.0 # via pre-commit -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via # requests # snowflake-connector-python -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # geomet # great-expectations # pip-tools + # typer # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via @@ -126,9 +136,11 @@ comm==0.2.2 # ipywidgets couchbase==4.3.2 # via feast (setup.py) -coverage[toml]==7.6.4 +couchbase-columnar==1.0.0 + # via feast (setup.py) +coverage[toml]==7.6.12 # via pytest-cov -cryptography==42.0.8 +cryptography==43.0.3 # via # feast (setup.py) # azure-identity @@ -142,56 +154,76 @@ cryptography==42.0.8 # snowflake-connector-python # types-pyopenssl # types-redis -cython==3.0.11 +cython==3.0.12 # via thriftpy2 -dask[dataframe]==2024.10.0 - # via - # feast (setup.py) - # dask-expr -dask-expr==1.1.16 - # via dask -db-dtypes==1.3.0 +dask[dataframe]==2025.2.0 + # via feast (setup.py) +db-dtypes==1.4.1 # via google-cloud-bigquery -debugpy==1.8.7 +debugpy==1.8.12 # via ipykernel -decorator==5.1.1 +decorator==5.2.1 # via ipython defusedxml==0.7.1 # via nbconvert -deltalake==0.20.2 +deltalake==0.25.1 # via feast (setup.py) deprecation==2.1.0 # via python-keycloak dill==0.3.9 - # via feast (setup.py) + # via + # feast (setup.py) + # multiprocess distlib==0.3.9 # via virtualenv docker==7.1.0 # via testcontainers +docling==2.24.0 + # via feast (setup.py) +docling-core[chunking]==2.20.0 + # via + # docling + # docling-ibm-models + # docling-parse +docling-ibm-models==3.4.0 + # via docling +docling-parse==3.4.0 + # via docling docutils==0.19 # via sphinx -duckdb==1.1.2 +duckdb==1.1.3 # via ibis-framework -elastic-transport==8.15.1 +easyocr==1.7.2 + # via docling +elastic-transport==8.17.0 # via elasticsearch -elasticsearch==8.15.1 +elasticsearch==8.17.1 # via feast (setup.py) entrypoints==0.4 # via altair +environs==9.5.0 + # via pymilvus +et-xmlfile==2.0.0 + # via openpyxl execnet==2.1.1 # via pytest-xdist -executing==2.1.0 +executing==2.2.0 # via stack-data -faiss-cpu==1.9.0 +faiss-cpu==1.10.0 # via feast (setup.py) -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fastjsonschema==2.20.0 +fastjsonschema==2.21.1 # via nbformat -filelock==3.16.1 +filelock==3.17.0 # via + # huggingface-hub # snowflake-connector-python + # torch + # transformers # virtualenv +filetype==1.2.0 + # via docling fqdn==1.5.1 # via jsonschema frozenlist==1.5.0 @@ -202,9 +234,11 @@ fsspec==2024.9.0 # via # feast (setup.py) # dask + # huggingface-hub + # torch geomet==0.2.1.post1 # via cassandra-driver -google-api-core[grpc]==2.22.0 +google-api-core[grpc]==2.24.1 # via # feast (setup.py) # google-cloud-bigquery @@ -213,7 +247,7 @@ google-api-core[grpc]==2.22.0 # google-cloud-core # google-cloud-datastore # google-cloud-storage -google-auth==2.35.0 +google-auth==2.38.0 # via # google-api-core # google-cloud-bigquery @@ -223,21 +257,21 @@ google-auth==2.35.0 # google-cloud-datastore # google-cloud-storage # kubernetes -google-cloud-bigquery[pandas]==3.26.0 +google-cloud-bigquery[pandas]==3.29.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.27.0 +google-cloud-bigquery-storage==2.28.0 # via feast (setup.py) -google-cloud-bigtable==2.26.0 +google-cloud-bigtable==2.28.1 # via feast (setup.py) -google-cloud-core==2.4.1 +google-cloud-core==2.4.2 # via # google-cloud-bigquery # google-cloud-bigtable # google-cloud-datastore # google-cloud-storage -google-cloud-datastore==2.20.1 +google-cloud-datastore==2.20.2 # via feast (setup.py) -google-cloud-storage==2.18.2 +google-cloud-storage==2.19.0 # via feast (setup.py) google-crc32c==1.6.0 # via @@ -247,7 +281,7 @@ google-resumable-media==2.7.2 # via # google-cloud-bigquery # google-cloud-storage -googleapis-common-protos[grpc]==1.65.0 +googleapis-common-protos[grpc]==1.68.0 # via # feast (setup.py) # google-api-core @@ -255,9 +289,11 @@ googleapis-common-protos[grpc]==1.65.0 # grpcio-status great-expectations==0.18.22 # via feast (setup.py) -grpc-google-iam-v1==0.13.1 +greenlet==3.1.1 + # via sqlalchemy +grpc-google-iam-v1==0.14.0 # via google-cloud-bigtable -grpcio==1.67.0 +grpcio==1.70.0 # via # feast (setup.py) # google-api-core @@ -268,16 +304,20 @@ grpcio==1.67.0 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy + # pymilvus # qdrant-client -grpcio-health-checking==1.62.3 +grpcio-health-checking==1.70.0 # via feast (setup.py) -grpcio-reflection==1.62.3 +grpcio-reflection==1.70.0 # via feast (setup.py) -grpcio-status==1.62.3 - # via google-api-core -grpcio-testing==1.62.3 +grpcio-status==1.70.0 + # via + # google-api-core + # ikvpy +grpcio-testing==1.70.0 # via feast (setup.py) -grpcio-tools==1.62.3 +grpcio-tools==1.70.0 # via # feast (setup.py) # qdrant-client @@ -289,7 +329,7 @@ h11==0.14.0 # via # httpcore # uvicorn -h2==4.1.0 +h2==4.2.0 # via httpx happybase==1.2.0 # via feast (setup.py) @@ -297,9 +337,9 @@ hazelcast-python-client==5.5.0 # via feast (setup.py) hiredis==2.4.0 # via feast (setup.py) -hpack==4.0.0 +hpack==4.1.0 # via h2 -httpcore==1.0.6 +httpcore==1.0.7 # via httpx httptools==0.6.4 # via uvicorn @@ -309,15 +349,21 @@ httpx[http2]==0.27.2 # jupyterlab # python-keycloak # qdrant-client -hyperframe==6.0.1 +huggingface-hub==0.29.1 + # via + # docling + # docling-ibm-models + # tokenizers + # transformers +hyperframe==6.1.0 # via h2 -ibis-framework[duckdb]==9.5.0 +ibis-framework[duckdb, mssql]==9.5.0 # via # feast (setup.py) # ibis-substrait ibis-substrait==4.0.1 # via feast (setup.py) -identify==2.6.1 +identify==2.6.8 # via pre-commit idna==3.10 # via @@ -327,15 +373,19 @@ idna==3.10 # requests # snowflake-connector-python # yarl +ikvpy==0.0.36 + # via feast (setup.py) +imageio==2.37.0 + # via scikit-image imagesize==1.4.1 # via sphinx -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via dask iniconfig==2.0.0 # via pytest ipykernel==6.29.5 # via jupyterlab -ipython==8.29.0 +ipython==8.32.0 # via # great-expectations # ipykernel @@ -346,9 +396,9 @@ isodate==0.7.2 # via azure-storage-blob isoduration==20.11.0 # via jsonschema -jedi==0.19.1 +jedi==0.19.2 # via ipython -jinja2==3.1.4 +jinja2==3.1.5 # via # feast (setup.py) # altair @@ -359,22 +409,29 @@ jinja2==3.1.4 # moto # nbconvert # sphinx + # torch jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -json5==0.9.25 +json5==0.10.0 # via jupyterlab-server +jsonlines==3.1.0 + # via docling-ibm-models jsonpatch==1.33 # via great-expectations jsonpointer==3.0.0 # via # jsonpatch # jsonschema +jsonref==1.1.0 + # via docling-core jsonschema[format-nongpl]==4.23.0 # via # feast (setup.py) # altair + # docling-core # great-expectations # jupyter-events # jupyterlab-server @@ -395,11 +452,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat -jupyter-events==0.10.0 +jupyter-events==0.12.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.2 +jupyter-server==2.15.0 # via # jupyter-lsp # jupyterlab @@ -408,7 +465,7 @@ jupyter-server==2.14.2 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.5 +jupyterlab==4.3.5 # via notebook jupyterlab-pygments==0.3.0 # via nbconvert @@ -422,38 +479,59 @@ jwcrypto==1.5.6 # via python-keycloak kubernetes==20.13.0 # via feast (setup.py) +latex2mathml==3.77.0 + # via docling-core +lazy-loader==0.4 + # via scikit-image locket==1.0.0 # via partd +lxml==5.3.1 + # via + # docling + # python-docx + # python-pptx +lz4==4.4.3 + # via trino makefun==1.15.6 # via great-expectations markdown-it-py==3.0.0 # via rich +marko==2.1.2 + # via docling markupsafe==3.0.2 # via # jinja2 # nbconvert # werkzeug -marshmallow==3.23.0 - # via great-expectations +marshmallow==3.26.1 + # via + # environs + # great-expectations matplotlib-inline==0.1.7 # via # ipykernel # ipython mdurl==0.1.2 # via markdown-it-py -minio==7.1.0 +milvus-lite==2.4.11 + # via pymilvus +minio==7.2.11 # via feast (setup.py) -mistune==3.0.2 +mistune==3.1.2 # via # great-expectations # nbconvert -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) mock==2.0.0 # via feast (setup.py) moto==4.2.14 # via feast (setup.py) -msal==1.31.0 +mpire[dill]==2.10.2 + # via semchunk +mpmath==1.3.0 + # via sympy +msal==1.31.1 # via # azure-identity # msal-extensions @@ -461,8 +539,11 @@ msal-extensions==1.2.0 # via azure-identity multidict==6.1.0 # via + # aiobotocore # aiohttp # yarl +multiprocess==0.70.17 + # via mpire mypy==1.11.2 # via # feast (setup.py) @@ -471,9 +552,9 @@ mypy-extensions==1.0.0 # via mypy mypy-protobuf==3.3.0 # via feast (setup.py) -nbclient==0.10.0 +nbclient==0.10.2 # via nbconvert -nbconvert==7.16.4 +nbconvert==7.16.6 # via jupyter-server nbformat==5.10.4 # via @@ -483,9 +564,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel +networkx==3.4.2 + # via + # scikit-image + # torch +ninja==1.11.1.3 + # via easyocr nodeenv==1.9.1 # via pre-commit -notebook==7.2.2 +notebook==7.3.2 # via great-expectations notebook-shim==0.2.4 # via @@ -497,18 +584,34 @@ numpy==1.26.4 # altair # dask # db-dtypes + # docling-ibm-models + # easyocr # faiss-cpu # great-expectations # ibis-framework + # imageio + # opencv-python-headless # pandas # pyarrow # qdrant-client + # safetensors + # scikit-image # scipy + # shapely + # tifffile + # torchvision + # transformers oauthlib==3.2.2 # via requests-oauthlib +opencv-python-headless==4.11.0.86 + # via + # docling-ibm-models + # easyocr +openpyxl==3.1.5 + # via docling overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -518,27 +621,34 @@ packaging==24.1 # google-cloud-bigquery # great-expectations # gunicorn + # huggingface-hub # ibis-framework # ibis-substrait # ipykernel + # jupyter-events # jupyter-server # jupyterlab # jupyterlab-server + # lazy-loader # marshmallow # nbconvert # pytest + # scikit-image # snowflake-connector-python # sphinx + # transformers pandas==2.2.3 # via # feast (setup.py) # altair # dask - # dask-expr # db-dtypes + # docling + # docling-core # google-cloud-bigquery # great-expectations # ibis-framework + # pymilvus # snowflake-connector-python pandocfilters==1.5.1 # via nbconvert @@ -550,11 +660,22 @@ parsy==2.1 # via ibis-framework partd==1.4.2 # via dask -pbr==6.1.0 +pbr==6.1.1 # via mock pexpect==4.9.0 # via ipython -pip==24.3.1 +pillow==11.1.0 + # via + # docling + # docling-core + # docling-ibm-models + # docling-parse + # easyocr + # imageio + # python-pptx + # scikit-image + # torchvision +pip==25.0.1 # via pip-tools pip-tools==7.4.1 # via feast (setup.py) @@ -573,21 +694,23 @@ portalocker==2.10.1 # qdrant-client pre-commit==3.3.1 # via feast (setup.py) -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via # feast (setup.py) # jupyter-server -prompt-toolkit==3.0.48 +prompt-toolkit==3.0.50 # via ipython -propcache==0.2.0 - # via yarl -proto-plus==1.25.0 +propcache==0.3.0 + # via + # aiohttp + # yarl +proto-plus==1.26.0 # via # google-api-core # google-cloud-bigquery-storage # google-cloud-bigtable # google-cloud-datastore -protobuf==4.25.5 +protobuf==5.29.3 # via # feast (setup.py) # google-api-core @@ -601,18 +724,20 @@ protobuf==4.25.5 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy # mypy-protobuf # proto-plus + # pymilvus # substrait psutil==5.9.0 # via # feast (setup.py) # ipykernel -psycopg[binary, pool]==3.2.3 +psycopg[binary, pool]==3.2.5 # via feast (setup.py) -psycopg-binary==3.2.3 +psycopg-binary==3.2.5 # via psycopg -psycopg-pool==3.2.3 +psycopg-pool==3.2.5 # via psycopg ptyprocess==0.7.0 # via @@ -629,7 +754,7 @@ py4j==0.10.9.7 pyarrow==17.0.0 # via # feast (setup.py) - # dask-expr + # dask # db-dtypes # deltalake # google-cloud-bigquery @@ -645,44 +770,62 @@ pyasn1-modules==0.4.1 # via google-auth pybindgen==0.22.1 # via feast (setup.py) +pyclipper==1.3.0.post6 + # via easyocr pycparser==2.22 # via cffi -pydantic==2.9.2 +pycryptodome==3.21.0 + # via minio +pydantic==2.10.6 # via # feast (setup.py) + # docling + # docling-core + # docling-ibm-models + # docling-parse # fastapi # great-expectations + # pydantic-settings # qdrant-client -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pydantic-settings==2.8.0 + # via docling +pygments==2.19.1 # via # feast (setup.py) # ipython + # mpire # nbconvert # rich # sphinx -pyjwt[crypto]==2.9.0 +pyjwt[crypto]==2.10.1 # via # feast (setup.py) # msal # singlestoredb # snowflake-connector-python -pymssql==2.3.1 +pymilvus==2.4.9 + # via feast (setup.py) +pymssql==2.3.2 # via feast (setup.py) pymysql==1.1.1 # via feast (setup.py) pyodbc==5.2.0 - # via feast (setup.py) -pyopenssl==24.2.1 + # via + # feast (setup.py) + # ibis-framework +pyopenssl==24.3.0 # via snowflake-connector-python -pyparsing==3.2.0 +pyparsing==3.2.1 # via great-expectations +pypdfium2==4.30.1 + # via docling pyproject-hooks==1.2.0 # via # build # pip-tools -pyspark==3.5.3 +pyspark==3.5.4 # via feast (setup.py) pytest==7.4.4 # via @@ -700,7 +843,7 @@ pytest-asyncio==0.23.8 # via feast (setup.py) pytest-benchmark==3.4.1 # via feast (setup.py) -pytest-cov==5.0.0 +pytest-cov==6.0.0 # via feast (setup.py) pytest-env==1.1.3 # via feast (setup.py) @@ -714,8 +857,11 @@ pytest-timeout==1.4.2 # via feast (setup.py) pytest-xdist==3.6.1 # via feast (setup.py) +python-bidi==0.6.6 + # via easyocr python-dateutil==2.9.0.post0 # via + # aiobotocore # arrow # botocore # google-cloud-bigquery @@ -726,13 +872,20 @@ python-dateutil==2.9.0.post0 # moto # pandas # trino +python-docx==1.1.2 + # via docling python-dotenv==1.0.1 - # via uvicorn -python-json-logger==2.0.7 + # via + # environs + # pydantic-settings + # uvicorn +python-json-logger==3.2.1 # via jupyter-events python-keycloak==4.2.2 # via feast (setup.py) -pytz==2024.2 +python-pptx==1.0.2 + # via docling +pytz==2025.1 # via # great-expectations # ibis-framework @@ -743,39 +896,46 @@ pyyaml==6.0.2 # via # feast (setup.py) # dask + # docling-core + # easyocr + # huggingface-hub # ibis-substrait # jupyter-events # kubernetes # pre-commit # responses + # transformers # uvicorn -pyzmq==26.2.0 +pyzmq==26.2.1 # via # ipykernel # jupyter-client # jupyter-server -qdrant-client==1.12.0 +qdrant-client==1.13.2 # via feast (setup.py) redis==4.6.0 # via feast (setup.py) -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications # jupyter-events -regex==2024.9.11 +regex==2024.11.6 # via # feast (setup.py) # parsimonious + # transformers requests==2.32.3 # via # feast (setup.py) # azure-core # docker + # docling # google-api-core # google-cloud-bigquery # google-cloud-storage # great-expectations + # huggingface-hub # jupyterlab-server # kubernetes # moto @@ -787,12 +947,13 @@ requests==2.32.3 # singlestoredb # snowflake-connector-python # sphinx + # transformers # trino requests-oauthlib==2.0.0 # via kubernetes requests-toolbelt==1.0.0 # via python-keycloak -responses==0.25.3 +responses==0.25.6 # via moto rfc3339-validator==0.1.4 # via @@ -802,40 +963,60 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.9.3 - # via ibis-framework -rpds-py==0.20.0 +rich==13.9.4 + # via + # ibis-framework + # typer +rpds-py==0.23.1 # via # jsonschema # referencing rsa==4.9 # via google-auth +rtree==1.3.0 + # via docling ruamel-yaml==0.17.40 # via great-expectations ruamel-yaml-clib==0.2.12 # via ruamel-yaml -ruff==0.7.1 +ruff==0.9.7 # via feast (setup.py) -s3transfer==0.10.3 +s3transfer==0.11.2 # via boto3 -scipy==1.14.1 - # via great-expectations +safetensors[torch]==0.5.2 + # via + # docling-ibm-models + # transformers +scikit-image==0.25.2 + # via easyocr +scipy==1.15.2 + # via + # docling + # easyocr + # great-expectations + # scikit-image +semchunk==2.2.2 + # via docling-core send2trash==1.8.3 # via jupyter-server -setuptools==75.2.0 +setuptools==75.8.0 # via # grpcio-tools # jupyterlab # kubernetes + # pbr # pip-tools + # pymilvus # singlestoredb +shapely==2.0.7 + # via easyocr +shellingham==1.5.4 + # via typer singlestoredb==1.7.2 # via feast (setup.py) -six==1.16.0 +six==1.17.0 # via - # asttokens # azure-core - # bleach # geomet # happybase # kubernetes @@ -849,7 +1030,7 @@ sniffio==1.3.1 # httpx snowballstemmer==2.2.0 # via sphinx -snowflake-connector-python[pandas]==3.12.3 +snowflake-connector-python[pandas]==3.13.2 # via feast (setup.py) sortedcontainers==2.4.0 # via snowflake-connector-python @@ -869,36 +1050,47 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) sqlglot==25.20.2 # via ibis-framework -sqlite-vec==0.1.1 +sqlite-vec==0.1.6 # via feast (setup.py) -sqlparams==6.1.0 +sqlparams==6.2.0 # via singlestoredb stack-data==0.6.3 # via ipython -starlette==0.41.2 +starlette==0.45.3 # via fastapi substrait==0.23.0 # via ibis-substrait +sympy==1.13.3 + # via torch tabulate==0.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # docling-core + # docling-parse tenacity==8.5.0 # via feast (setup.py) terminado==0.18.1 # via # jupyter-server # jupyter-server-terminals -testcontainers==4.4.0 +testcontainers==4.8.2 # via feast (setup.py) thriftpy2==0.5.2 # via happybase +tifffile==2025.2.18 + # via scikit-image tinycss2==1.4.0 - # via nbconvert + # via bleach +tokenizers==0.19.1 + # via transformers toml==0.10.2 # via feast (setup.py) +tomli==2.2.1 + # via coverage tomlkit==0.13.2 # via snowflake-connector-python toolz==0.12.1 @@ -907,7 +1099,19 @@ toolz==0.12.1 # dask # ibis-framework # partd -tornado==6.4.1 +torch==2.2.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr + # safetensors + # torchvision +torchvision==0.17.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr +tornado==6.4.2 # via # ipykernel # jupyter-client @@ -915,10 +1119,17 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tqdm==4.66.6 +tqdm==4.67.1 # via # feast (setup.py) + # docling + # docling-ibm-models # great-expectations + # huggingface-hub + # milvus-lite + # mpire + # semchunk + # transformers traitlets==5.14.3 # via # comm @@ -934,70 +1145,91 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat -trino==0.330.0 +transformers==4.42.4 + # via + # docling-core + # docling-ibm-models +trino==0.333.0 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) -types-cffi==1.16.0.20240331 +typer==0.12.5 + # via + # docling + # docling-core +types-cffi==1.16.0.20241221 # via types-pyopenssl types-protobuf==3.19.22 # via # feast (setup.py) # mypy-protobuf -types-pymysql==1.1.0.20240524 +types-pymysql==1.1.0.20241103 # via feast (setup.py) types-pyopenssl==24.1.0.20240722 # via types-redis -types-python-dateutil==2.9.0.20241003 +types-python-dateutil==2.9.0.20241206 # via # feast (setup.py) # arrow -types-pytz==2024.2.0.20241003 +types-pytz==2025.1.0.20250204 # via feast (setup.py) -types-pyyaml==6.0.12.20240917 +types-pyyaml==6.0.12.20241230 # via feast (setup.py) types-redis==4.6.0.20241004 # via feast (setup.py) types-requests==2.30.0.0 # via feast (setup.py) -types-setuptools==75.2.0.20241025 +types-setuptools==75.8.0.20250225 # via # feast (setup.py) # types-cffi -types-tabulate==0.9.0.20240106 +types-tabulate==0.9.0.20241207 # via feast (setup.py) types-urllib3==1.26.25.14 # via types-requests typing-extensions==4.12.2 # via + # anyio # azure-core # azure-identity # azure-storage-blob + # beautifulsoup4 + # docling-core # fastapi # great-expectations + # huggingface-hub # ibis-framework # ipython # jwcrypto + # minio # mypy # psycopg # psycopg-pool # pydantic # pydantic-core + # python-docx + # python-pptx + # referencing # snowflake-connector-python # sqlalchemy # testcontainers + # torch # typeguard -tzdata==2024.2 + # typer +tzdata==2025.1 # via pandas -tzlocal==5.2 +tzlocal==5.3 # via # great-expectations # trino +ujson==5.10.0 + # via pymilvus uri-template==1.3.0 # via jsonschema -urllib3==2.2.3 +urllib3==2.3.0 # via # feast (setup.py) + # aiobotocore # botocore # docker # elastic-transport @@ -1008,11 +1240,11 @@ urllib3==2.2.3 # requests # responses # testcontainers -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn @@ -1020,11 +1252,11 @@ virtualenv==20.23.0 # via # feast (setup.py) # pre-commit -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.8.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1034,23 +1266,27 @@ websocket-client==1.8.0 # via # jupyter-server # kubernetes -websockets==13.1 +websockets==15.0 # via uvicorn -werkzeug==3.0.6 +werkzeug==3.1.3 # via moto -wheel==0.44.0 +wheel==0.45.1 # via # pip-tools # singlestoredb widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.2 # via # aiobotocore # testcontainers +xlsxwriter==3.2.2 + # via python-pptx xmltodict==0.14.2 # via moto -yarl==1.16.0 +yarl==1.18.3 # via aiohttp -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata +zstandard==0.23.0 + # via trino diff --git a/sdk/python/requirements/py3.11-requirements.txt b/sdk/python/requirements/py3.11-requirements.txt index c9833ca07b0..d33da6d75c2 100644 --- a/sdk/python/requirements/py3.11-requirements.txt +++ b/sdk/python/requirements/py3.11-requirements.txt @@ -2,41 +2,39 @@ # uv pip compile -p 3.11 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.11-requirements.txt annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # starlette # watchfiles -attrs==24.2.0 +attrs==25.1.0 # via # jsonschema # referencing -bigtree==0.21.3 +bigtree==0.25.0 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via requests -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via requests -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via feast (setup.py) -dask[dataframe]==2024.10.0 - # via - # feast (setup.py) - # dask-expr -dask-expr==1.1.16 - # via dask +dask[dataframe]==2025.2.0 + # via feast (setup.py) dill==0.3.9 # via feast (setup.py) -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fsspec==2024.10.0 +fsspec==2025.2.0 # via dask +greenlet==3.1.1 + # via sqlalchemy gunicorn==23.0.0 # via # feast (setup.py) @@ -49,9 +47,9 @@ idna==3.10 # via # anyio # requests -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via dask -jinja2==3.1.4 +jinja2==3.1.5 # via feast (setup.py) jsonschema==4.23.0 # via feast (setup.py) @@ -61,9 +59,9 @@ locket==1.0.0 # via partd markupsafe==3.0.2 # via jinja2 -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) -mypy==1.13.0 +mypy==1.15.0 # via sqlalchemy mypy-extensions==1.0.0 # via mypy @@ -72,7 +70,7 @@ numpy==1.26.4 # feast (setup.py) # dask # pandas -packaging==24.1 +packaging==24.2 # via # dask # gunicorn @@ -80,57 +78,56 @@ pandas==2.2.3 # via # feast (setup.py) # dask - # dask-expr partd==1.4.2 # via dask -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via feast (setup.py) -protobuf==4.25.5 +protobuf==5.29.3 # via feast (setup.py) -psutil==6.1.0 +psutil==7.0.0 # via feast (setup.py) pyarrow==18.0.0 # via # feast (setup.py) - # dask-expr -pydantic==2.9.2 + # dask +pydantic==2.10.6 # via # feast (setup.py) # fastapi -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pygments==2.19.1 # via feast (setup.py) -pyjwt==2.9.0 +pyjwt==2.10.1 # via feast (setup.py) python-dateutil==2.9.0.post0 # via pandas python-dotenv==1.0.1 # via uvicorn -pytz==2024.2 +pytz==2025.1 # via pandas pyyaml==6.0.2 # via # feast (setup.py) # dask # uvicorn -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications requests==2.32.3 # via feast (setup.py) -rpds-py==0.20.0 +rpds-py==0.23.1 # via # jsonschema # referencing -six==1.16.0 +six==1.17.0 # via python-dateutil sniffio==1.3.1 # via anyio -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) -starlette==0.41.2 +starlette==0.45.3 # via fastapi tabulate==0.9.0 # via feast (setup.py) @@ -142,33 +139,35 @@ toolz==1.0.0 # via # dask # partd -tqdm==4.66.6 +tqdm==4.67.1 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) typing-extensions==4.12.2 # via + # anyio # fastapi # mypy # pydantic # pydantic-core + # referencing # sqlalchemy # typeguard -tzdata==2024.2 +tzdata==2025.1 # via pandas -urllib3==2.2.3 +urllib3==2.3.0 # via requests -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn -websockets==13.1 +websockets==15.0 # via uvicorn -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata diff --git a/sdk/python/requirements/py3.9-ci-requirements.txt b/sdk/python/requirements/py3.9-ci-requirements.txt index 30eab84822e..4473d933258 100644 --- a/sdk/python/requirements/py3.9-ci-requirements.txt +++ b/sdk/python/requirements/py3.9-ci-requirements.txt @@ -1,14 +1,14 @@ # This file was autogenerated by uv via the following command: # uv pip compile -p 3.9 --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.9-ci-requirements.txt -aiobotocore==2.15.2 +aiobotocore==2.20.0 # via feast (setup.py) -aiohappyeyeballs==2.4.3 +aiohappyeyeballs==2.4.6 # via aiohttp -aiohttp==3.10.10 +aiohttp==3.11.13 # via aiobotocore aioitertools==0.12.0 # via aiobotocore -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp alabaster==0.7.16 # via sphinx @@ -16,7 +16,7 @@ altair==4.2.2 # via great-expectations annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # httpx # jupyter-server @@ -25,7 +25,9 @@ anyio==4.6.2.post1 appnope==0.1.4 # via ipykernel argon2-cffi==23.1.0 - # via jupyter-server + # via + # jupyter-server + # minio argon2-cffi-bindings==21.2.0 # via argon2-cffi arrow==1.3.0 @@ -34,48 +36,52 @@ asn1crypto==1.5.1 # via snowflake-connector-python assertpy==1.1 # via feast (setup.py) -asttokens==2.4.1 +asttokens==3.0.0 # via stack-data async-lru==2.0.4 # via jupyterlab async-property==0.2.2 # via python-keycloak -async-timeout==4.0.3 +async-timeout==5.0.1 # via # aiohttp # redis atpublic==4.1.0 # via ibis-framework -attrs==24.2.0 +attrs==25.1.0 # via # aiohttp + # jsonlines # jsonschema # referencing -azure-core==1.31.0 +azure-core==1.32.0 # via # azure-identity # azure-storage-blob -azure-identity==1.19.0 +azure-identity==1.20.0 # via feast (setup.py) -azure-storage-blob==12.23.1 +azure-storage-blob==12.24.1 # via feast (setup.py) -babel==2.16.0 +babel==2.17.0 # via # jupyterlab-server # sphinx -beautifulsoup4==4.12.3 - # via nbconvert +beautifulsoup4==4.13.3 + # via + # docling + # nbconvert bidict==0.23.1 # via ibis-framework -bigtree==0.21.3 +bigtree==0.25.0 # via feast (setup.py) -bleach==6.1.0 +bleach[css]==6.2.0 # via nbconvert -boto3==1.35.36 +boto3==1.36.23 # via # feast (setup.py) + # ikvpy # moto -botocore==1.35.36 +botocore==1.36.23 # via # aiobotocore # boto3 @@ -86,12 +92,13 @@ build==1.2.2.post1 # feast (setup.py) # pip-tools # singlestoredb -cachetools==5.5.0 +cachetools==5.5.2 # via google-auth cassandra-driver==3.29.2 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via + # docling # elastic-transport # httpcore # httpx @@ -101,24 +108,27 @@ certifi==2024.8.30 # snowflake-connector-python cffi==1.17.1 # via + # feast (setup.py) # argon2-cffi-bindings # cryptography + # ikvpy # snowflake-connector-python cfgv==3.4.0 # via pre-commit -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via # requests # snowflake-connector-python -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # geomet # great-expectations # pip-tools + # typer # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via @@ -130,9 +140,11 @@ comm==0.2.2 # ipywidgets couchbase==4.3.2 # via feast (setup.py) -coverage[toml]==7.6.4 +couchbase-columnar==1.0.0 + # via feast (setup.py) +coverage[toml]==7.6.12 # via pytest-cov -cryptography==42.0.8 +cryptography==43.0.3 # via # feast (setup.py) # azure-identity @@ -146,7 +158,7 @@ cryptography==42.0.8 # snowflake-connector-python # types-pyopenssl # types-redis -cython==3.0.11 +cython==3.0.12 # via thriftpy2 dask[dataframe]==2024.8.0 # via @@ -154,34 +166,51 @@ dask[dataframe]==2024.8.0 # dask-expr dask-expr==1.1.10 # via dask -db-dtypes==1.3.0 +db-dtypes==1.4.1 # via google-cloud-bigquery -debugpy==1.8.7 +debugpy==1.8.12 # via ipykernel -decorator==5.1.1 +decorator==5.2.1 # via ipython defusedxml==0.7.1 # via nbconvert -deltalake==0.20.2 +deltalake==0.25.1 # via feast (setup.py) deprecation==2.1.0 # via python-keycloak dill==0.3.9 - # via feast (setup.py) + # via + # feast (setup.py) + # multiprocess distlib==0.3.9 # via virtualenv docker==7.1.0 # via testcontainers +docling==2.24.0 + # via feast (setup.py) +docling-core[chunking]==2.20.0 + # via + # docling + # docling-ibm-models + # docling-parse +docling-ibm-models==3.4.0 + # via docling +docling-parse==3.4.0 + # via docling docutils==0.19 # via sphinx duckdb==0.10.3 # via ibis-framework -elastic-transport==8.15.1 +easyocr==1.7.2 + # via docling +elastic-transport==8.17.0 # via elasticsearch -elasticsearch==8.15.1 +elasticsearch==8.17.1 # via feast (setup.py) entrypoints==0.4 # via altair +et-xmlfile==2.0.0 + # via openpyxl exceptiongroup==1.2.2 # via # anyio @@ -189,18 +218,23 @@ exceptiongroup==1.2.2 # pytest execnet==2.1.1 # via pytest-xdist -executing==2.1.0 +executing==2.2.0 # via stack-data -faiss-cpu==1.9.0 +faiss-cpu==1.10.0 # via feast (setup.py) -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fastjsonschema==2.20.0 +fastjsonschema==2.21.1 # via nbformat -filelock==3.16.1 +filelock==3.17.0 # via + # huggingface-hub # snowflake-connector-python + # torch + # transformers # virtualenv +filetype==1.2.0 + # via docling fqdn==1.5.1 # via jsonschema frozenlist==1.5.0 @@ -211,9 +245,11 @@ fsspec==2024.9.0 # via # feast (setup.py) # dask + # huggingface-hub + # torch geomet==0.2.1.post1 # via cassandra-driver -google-api-core[grpc]==2.22.0 +google-api-core[grpc]==2.24.1 # via # feast (setup.py) # google-cloud-bigquery @@ -222,7 +258,7 @@ google-api-core[grpc]==2.22.0 # google-cloud-core # google-cloud-datastore # google-cloud-storage -google-auth==2.35.0 +google-auth==2.38.0 # via # google-api-core # google-cloud-bigquery @@ -232,21 +268,21 @@ google-auth==2.35.0 # google-cloud-datastore # google-cloud-storage # kubernetes -google-cloud-bigquery[pandas]==3.26.0 +google-cloud-bigquery[pandas]==3.29.0 # via feast (setup.py) -google-cloud-bigquery-storage==2.27.0 +google-cloud-bigquery-storage==2.28.0 # via feast (setup.py) -google-cloud-bigtable==2.26.0 +google-cloud-bigtable==2.28.1 # via feast (setup.py) -google-cloud-core==2.4.1 +google-cloud-core==2.4.2 # via # google-cloud-bigquery # google-cloud-bigtable # google-cloud-datastore # google-cloud-storage -google-cloud-datastore==2.20.1 +google-cloud-datastore==2.20.2 # via feast (setup.py) -google-cloud-storage==2.18.2 +google-cloud-storage==2.19.0 # via feast (setup.py) google-crc32c==1.6.0 # via @@ -256,7 +292,7 @@ google-resumable-media==2.7.2 # via # google-cloud-bigquery # google-cloud-storage -googleapis-common-protos[grpc]==1.65.0 +googleapis-common-protos[grpc]==1.68.0 # via # feast (setup.py) # google-api-core @@ -264,9 +300,11 @@ googleapis-common-protos[grpc]==1.65.0 # grpcio-status great-expectations==0.18.22 # via feast (setup.py) -grpc-google-iam-v1==0.13.1 +greenlet==3.1.1 + # via sqlalchemy +grpc-google-iam-v1==0.14.0 # via google-cloud-bigtable -grpcio==1.67.0 +grpcio==1.67.1 # via # feast (setup.py) # google-api-core @@ -277,16 +315,20 @@ grpcio==1.67.0 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy + # pymilvus # qdrant-client -grpcio-health-checking==1.62.3 +grpcio-health-checking==1.67.1 # via feast (setup.py) -grpcio-reflection==1.62.3 +grpcio-reflection==1.67.1 # via feast (setup.py) -grpcio-status==1.62.3 - # via google-api-core -grpcio-testing==1.62.3 +grpcio-status==1.67.1 + # via + # google-api-core + # ikvpy +grpcio-testing==1.67.1 # via feast (setup.py) -grpcio-tools==1.62.3 +grpcio-tools==1.67.1 # via # feast (setup.py) # qdrant-client @@ -298,7 +340,7 @@ h11==0.14.0 # via # httpcore # uvicorn -h2==4.1.0 +h2==4.2.0 # via httpx happybase==1.2.0 # via feast (setup.py) @@ -306,9 +348,9 @@ hazelcast-python-client==5.5.0 # via feast (setup.py) hiredis==2.4.0 # via feast (setup.py) -hpack==4.0.0 +hpack==4.1.0 # via h2 -httpcore==1.0.6 +httpcore==1.0.7 # via httpx httptools==0.6.4 # via uvicorn @@ -318,15 +360,21 @@ httpx[http2]==0.27.2 # jupyterlab # python-keycloak # qdrant-client -hyperframe==6.0.1 +huggingface-hub==0.29.1 + # via + # docling + # docling-ibm-models + # tokenizers + # transformers +hyperframe==6.1.0 # via h2 -ibis-framework[duckdb]==9.0.0 +ibis-framework[duckdb, mssql]==9.0.0 # via # feast (setup.py) # ibis-substrait ibis-substrait==4.0.1 # via feast (setup.py) -identify==2.6.1 +identify==2.6.8 # via pre-commit idna==3.10 # via @@ -336,9 +384,13 @@ idna==3.10 # requests # snowflake-connector-python # yarl +ikvpy==0.0.36 + # via feast (setup.py) +imageio==2.37.0 + # via scikit-image imagesize==1.4.1 # via sphinx -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via # build # dask @@ -364,9 +416,9 @@ isodate==0.7.2 # via azure-storage-blob isoduration==20.11.0 # via jsonschema -jedi==0.19.1 +jedi==0.19.2 # via ipython -jinja2==3.1.4 +jinja2==3.1.5 # via # feast (setup.py) # altair @@ -377,22 +429,29 @@ jinja2==3.1.4 # moto # nbconvert # sphinx + # torch jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -json5==0.9.25 +json5==0.10.0 # via jupyterlab-server +jsonlines==3.1.0 + # via docling-ibm-models jsonpatch==1.33 # via great-expectations jsonpointer==3.0.0 # via # jsonpatch # jsonschema +jsonref==1.1.0 + # via docling-core jsonschema[format-nongpl]==4.23.0 # via # feast (setup.py) # altair + # docling-core # great-expectations # jupyter-events # jupyterlab-server @@ -413,11 +472,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat -jupyter-events==0.10.0 +jupyter-events==0.12.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.2 +jupyter-server==2.15.0 # via # jupyter-lsp # jupyterlab @@ -426,7 +485,7 @@ jupyter-server==2.14.2 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.5 +jupyterlab==4.3.5 # via notebook jupyterlab-pygments==0.3.0 # via nbconvert @@ -440,18 +499,31 @@ jwcrypto==1.5.6 # via python-keycloak kubernetes==20.13.0 # via feast (setup.py) +latex2mathml==3.77.0 + # via docling-core +lazy-loader==0.4 + # via scikit-image locket==1.0.0 # via partd +lxml==5.3.1 + # via + # docling + # python-docx + # python-pptx +lz4==4.4.3 + # via trino makefun==1.15.6 # via great-expectations markdown-it-py==3.0.0 # via rich +marko==2.1.2 + # via docling markupsafe==3.0.2 # via # jinja2 # nbconvert # werkzeug -marshmallow==3.23.0 +marshmallow==3.26.1 # via great-expectations matplotlib-inline==0.1.7 # via @@ -459,19 +531,25 @@ matplotlib-inline==0.1.7 # ipython mdurl==0.1.2 # via markdown-it-py -minio==7.1.0 +milvus-lite==2.4.11 + # via pymilvus +minio==7.2.11 # via feast (setup.py) -mistune==3.0.2 +mistune==3.1.2 # via # great-expectations # nbconvert -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) mock==2.0.0 # via feast (setup.py) moto==4.2.14 # via feast (setup.py) -msal==1.31.0 +mpire[dill]==2.10.2 + # via semchunk +mpmath==1.3.0 + # via sympy +msal==1.31.1 # via # azure-identity # msal-extensions @@ -479,8 +557,11 @@ msal-extensions==1.2.0 # via azure-identity multidict==6.1.0 # via + # aiobotocore # aiohttp # yarl +multiprocess==0.70.17 + # via mpire mypy==1.11.2 # via # feast (setup.py) @@ -489,9 +570,9 @@ mypy-extensions==1.0.0 # via mypy mypy-protobuf==3.3.0 # via feast (setup.py) -nbclient==0.10.0 +nbclient==0.10.2 # via nbconvert -nbconvert==7.16.4 +nbconvert==7.16.6 # via jupyter-server nbformat==5.10.4 # via @@ -501,9 +582,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel +networkx==3.2.1 + # via + # scikit-image + # torch +ninja==1.11.1.3 + # via easyocr nodeenv==1.9.1 # via pre-commit -notebook==7.2.2 +notebook==7.3.2 # via great-expectations notebook-shim==0.2.4 # via @@ -515,18 +602,34 @@ numpy==1.26.4 # altair # dask # db-dtypes + # docling-ibm-models + # easyocr # faiss-cpu # great-expectations # ibis-framework + # imageio + # opencv-python-headless # pandas # pyarrow # qdrant-client + # safetensors + # scikit-image # scipy + # shapely + # tifffile + # torchvision + # transformers oauthlib==3.2.2 # via requests-oauthlib +opencv-python-headless==4.11.0.86 + # via + # docling-ibm-models + # easyocr +openpyxl==3.1.5 + # via docling overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -536,16 +639,21 @@ packaging==24.1 # google-cloud-bigquery # great-expectations # gunicorn + # huggingface-hub # ibis-substrait # ipykernel + # jupyter-events # jupyter-server # jupyterlab # jupyterlab-server + # lazy-loader # marshmallow # nbconvert # pytest + # scikit-image # snowflake-connector-python # sphinx + # transformers pandas==2.2.3 # via # feast (setup.py) @@ -553,9 +661,12 @@ pandas==2.2.3 # dask # dask-expr # db-dtypes + # docling + # docling-core # google-cloud-bigquery # great-expectations # ibis-framework + # pymilvus # snowflake-connector-python pandocfilters==1.5.1 # via nbconvert @@ -567,11 +678,22 @@ parsy==2.1 # via ibis-framework partd==1.4.2 # via dask -pbr==6.1.0 +pbr==6.1.1 # via mock pexpect==4.9.0 # via ipython -pip==24.3.1 +pillow==11.1.0 + # via + # docling + # docling-core + # docling-ibm-models + # docling-parse + # easyocr + # imageio + # python-pptx + # scikit-image + # torchvision +pip==25.0.1 # via pip-tools pip-tools==7.4.1 # via feast (setup.py) @@ -590,21 +712,23 @@ portalocker==2.10.1 # qdrant-client pre-commit==3.3.1 # via feast (setup.py) -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via # feast (setup.py) # jupyter-server -prompt-toolkit==3.0.48 +prompt-toolkit==3.0.50 # via ipython -propcache==0.2.0 - # via yarl -proto-plus==1.25.0 +propcache==0.3.0 + # via + # aiohttp + # yarl +proto-plus==1.26.0 # via # google-api-core # google-cloud-bigquery-storage # google-cloud-bigtable # google-cloud-datastore -protobuf==4.25.5 +protobuf==5.29.3 # via # feast (setup.py) # google-api-core @@ -618,18 +742,20 @@ protobuf==4.25.5 # grpcio-status # grpcio-testing # grpcio-tools + # ikvpy # mypy-protobuf # proto-plus + # pymilvus # substrait psutil==5.9.0 # via # feast (setup.py) # ipykernel -psycopg[binary, pool]==3.1.18 +psycopg[binary, pool]==3.2.5 # via feast (setup.py) -psycopg-binary==3.1.18 +psycopg-binary==3.2.5 # via psycopg -psycopg-pool==3.2.3 +psycopg-pool==3.2.5 # via psycopg ptyprocess==0.7.0 # via @@ -662,44 +788,62 @@ pyasn1-modules==0.4.1 # via google-auth pybindgen==0.22.1 # via feast (setup.py) +pyclipper==1.3.0.post6 + # via easyocr pycparser==2.22 # via cffi -pydantic==2.9.2 +pycryptodome==3.21.0 + # via minio +pydantic==2.10.6 # via # feast (setup.py) + # docling + # docling-core + # docling-ibm-models + # docling-parse # fastapi # great-expectations + # pydantic-settings # qdrant-client -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pydantic-settings==2.8.0 + # via docling +pygments==2.19.1 # via # feast (setup.py) # ipython + # mpire # nbconvert # rich # sphinx -pyjwt[crypto]==2.9.0 +pyjwt[crypto]==2.10.1 # via # feast (setup.py) # msal # singlestoredb # snowflake-connector-python -pymssql==2.3.1 +pymilvus==2.5.4 + # via feast (setup.py) +pymssql==2.3.2 # via feast (setup.py) pymysql==1.1.1 # via feast (setup.py) pyodbc==5.2.0 - # via feast (setup.py) -pyopenssl==24.2.1 + # via + # feast (setup.py) + # ibis-framework +pyopenssl==24.3.0 # via snowflake-connector-python -pyparsing==3.2.0 +pyparsing==3.2.1 # via great-expectations +pypdfium2==4.30.1 + # via docling pyproject-hooks==1.2.0 # via # build # pip-tools -pyspark==3.5.3 +pyspark==3.5.4 # via feast (setup.py) pytest==7.4.4 # via @@ -717,7 +861,7 @@ pytest-asyncio==0.23.8 # via feast (setup.py) pytest-benchmark==3.4.1 # via feast (setup.py) -pytest-cov==5.0.0 +pytest-cov==6.0.0 # via feast (setup.py) pytest-env==1.1.3 # via feast (setup.py) @@ -731,8 +875,11 @@ pytest-timeout==1.4.2 # via feast (setup.py) pytest-xdist==3.6.1 # via feast (setup.py) +python-bidi==0.6.6 + # via easyocr python-dateutil==2.9.0.post0 # via + # aiobotocore # arrow # botocore # google-cloud-bigquery @@ -743,13 +890,20 @@ python-dateutil==2.9.0.post0 # moto # pandas # trino +python-docx==1.1.2 + # via docling python-dotenv==1.0.1 - # via uvicorn -python-json-logger==2.0.7 + # via + # pydantic-settings + # pymilvus + # uvicorn +python-json-logger==3.2.1 # via jupyter-events python-keycloak==4.2.2 # via feast (setup.py) -pytz==2024.2 +python-pptx==1.0.2 + # via docling +pytz==2025.1 # via # great-expectations # ibis-framework @@ -760,39 +914,46 @@ pyyaml==6.0.2 # via # feast (setup.py) # dask + # docling-core + # easyocr + # huggingface-hub # ibis-substrait # jupyter-events # kubernetes # pre-commit # responses + # transformers # uvicorn -pyzmq==26.2.0 +pyzmq==26.2.1 # via # ipykernel # jupyter-client # jupyter-server -qdrant-client==1.12.0 +qdrant-client==1.13.2 # via feast (setup.py) redis==4.6.0 # via feast (setup.py) -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications # jupyter-events -regex==2024.9.11 +regex==2024.11.6 # via # feast (setup.py) # parsimonious + # transformers requests==2.32.3 # via # feast (setup.py) # azure-core # docker + # docling # google-api-core # google-cloud-bigquery # google-cloud-storage # great-expectations + # huggingface-hub # jupyterlab-server # kubernetes # moto @@ -804,12 +965,13 @@ requests==2.32.3 # singlestoredb # snowflake-connector-python # sphinx + # transformers # trino requests-oauthlib==2.0.0 # via kubernetes requests-toolbelt==1.0.0 # via python-keycloak -responses==0.25.3 +responses==0.25.6 # via moto rfc3339-validator==0.1.4 # via @@ -819,40 +981,60 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.9.3 - # via ibis-framework -rpds-py==0.20.0 +rich==13.9.4 + # via + # ibis-framework + # typer +rpds-py==0.23.1 # via # jsonschema # referencing rsa==4.9 # via google-auth +rtree==1.3.0 + # via docling ruamel-yaml==0.17.40 # via great-expectations ruamel-yaml-clib==0.2.12 # via ruamel-yaml -ruff==0.7.1 +ruff==0.9.7 # via feast (setup.py) -s3transfer==0.10.3 +s3transfer==0.11.2 # via boto3 +safetensors[torch]==0.5.2 + # via + # docling-ibm-models + # transformers +scikit-image==0.24.0 + # via easyocr scipy==1.13.1 - # via great-expectations + # via + # docling + # easyocr + # great-expectations + # scikit-image +semchunk==2.2.2 + # via docling-core send2trash==1.8.3 # via jupyter-server -setuptools==75.2.0 +setuptools==75.8.0 # via # grpcio-tools # jupyterlab # kubernetes + # pbr # pip-tools + # pymilvus # singlestoredb +shapely==2.0.7 + # via easyocr +shellingham==1.5.4 + # via typer singlestoredb==1.7.2 # via feast (setup.py) -six==1.16.0 +six==1.17.0 # via - # asttokens # azure-core - # bleach # geomet # happybase # kubernetes @@ -866,7 +1048,7 @@ sniffio==1.3.1 # httpx snowballstemmer==2.2.0 # via sphinx -snowflake-connector-python[pandas]==3.12.3 +snowflake-connector-python[pandas]==3.13.2 # via feast (setup.py) sortedcontainers==2.4.0 # via snowflake-connector-python @@ -886,37 +1068,46 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) sqlglot==23.12.2 # via ibis-framework -sqlite-vec==0.1.1 +sqlite-vec==0.1.6 # via feast (setup.py) -sqlparams==6.1.0 +sqlparams==6.2.0 # via singlestoredb stack-data==0.6.3 # via ipython -starlette==0.41.2 +starlette==0.45.3 # via fastapi substrait==0.23.0 # via ibis-substrait +sympy==1.13.3 + # via torch tabulate==0.9.0 - # via feast (setup.py) + # via + # feast (setup.py) + # docling-core + # docling-parse tenacity==8.5.0 # via feast (setup.py) terminado==0.18.1 # via # jupyter-server # jupyter-server-terminals -testcontainers==4.4.0 +testcontainers==4.8.2 # via feast (setup.py) thriftpy2==0.5.2 # via happybase +tifffile==2024.8.30 + # via scikit-image tinycss2==1.4.0 - # via nbconvert + # via bleach +tokenizers==0.19.1 + # via transformers toml==0.10.2 # via feast (setup.py) -tomli==2.0.2 +tomli==2.2.1 # via # build # coverage @@ -934,7 +1125,19 @@ toolz==0.12.1 # dask # ibis-framework # partd -tornado==6.4.1 +torch==2.2.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr + # safetensors + # torchvision +torchvision==0.17.2 + # via + # feast (setup.py) + # docling-ibm-models + # easyocr +tornado==6.4.2 # via # ipykernel # jupyter-client @@ -942,10 +1145,17 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tqdm==4.66.6 +tqdm==4.67.1 # via # feast (setup.py) + # docling + # docling-ibm-models # great-expectations + # huggingface-hub + # milvus-lite + # mpire + # semchunk + # transformers traitlets==5.14.3 # via # comm @@ -961,37 +1171,45 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat -trino==0.330.0 +transformers==4.42.4 + # via + # docling-core + # docling-ibm-models +trino==0.333.0 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) -types-cffi==1.16.0.20240331 +typer==0.12.5 + # via + # docling + # docling-core +types-cffi==1.16.0.20241221 # via types-pyopenssl types-protobuf==3.19.22 # via # feast (setup.py) # mypy-protobuf -types-pymysql==1.1.0.20240524 +types-pymysql==1.1.0.20241103 # via feast (setup.py) types-pyopenssl==24.1.0.20240722 # via types-redis -types-python-dateutil==2.9.0.20241003 +types-python-dateutil==2.9.0.20241206 # via # feast (setup.py) # arrow -types-pytz==2024.2.0.20241003 +types-pytz==2025.1.0.20250204 # via feast (setup.py) -types-pyyaml==6.0.12.20240917 +types-pyyaml==6.0.12.20241230 # via feast (setup.py) types-redis==4.6.0.20241004 # via feast (setup.py) types-requests==2.30.0.0 # via feast (setup.py) -types-setuptools==75.2.0.20241025 +types-setuptools==75.8.0.20250225 # via # feast (setup.py) # types-cffi -types-tabulate==0.9.0.20240106 +types-tabulate==0.9.0.20241207 # via feast (setup.py) types-urllib3==1.26.25.14 # via types-requests @@ -1003,35 +1221,49 @@ typing-extensions==4.12.2 # azure-core # azure-identity # azure-storage-blob + # beautifulsoup4 + # docling-core # fastapi # great-expectations + # huggingface-hub # ibis-framework # ipython # jwcrypto + # minio + # mistune # multidict # mypy # psycopg # psycopg-pool # pydantic # pydantic-core + # python-docx + # python-json-logger + # python-pptx + # referencing # rich # snowflake-connector-python # sqlalchemy # starlette # testcontainers + # torch # typeguard + # typer # uvicorn -tzdata==2024.2 +tzdata==2025.1 # via pandas -tzlocal==5.2 +tzlocal==5.3 # via # great-expectations # trino +ujson==5.10.0 + # via pymilvus uri-template==1.3.0 # via jsonschema urllib3==1.26.20 # via # feast (setup.py) + # aiobotocore # botocore # docker # elastic-transport @@ -1043,11 +1275,11 @@ urllib3==1.26.20 # responses # snowflake-connector-python # testcontainers -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn @@ -1055,11 +1287,11 @@ virtualenv==20.23.0 # via # feast (setup.py) # pre-commit -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.8.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1069,23 +1301,27 @@ websocket-client==1.8.0 # via # jupyter-server # kubernetes -websockets==13.1 +websockets==15.0 # via uvicorn -werkzeug==3.0.6 +werkzeug==3.1.3 # via moto -wheel==0.44.0 +wheel==0.45.1 # via # pip-tools # singlestoredb widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.2 # via # aiobotocore # testcontainers +xlsxwriter==3.2.2 + # via python-pptx xmltodict==0.14.2 # via moto -yarl==1.16.0 +yarl==1.18.3 # via aiohttp -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata +zstandard==0.23.0 + # via trino diff --git a/sdk/python/requirements/py3.9-requirements.txt b/sdk/python/requirements/py3.9-requirements.txt index ec46a195c12..e7aa5a42409 100644 --- a/sdk/python/requirements/py3.9-requirements.txt +++ b/sdk/python/requirements/py3.9-requirements.txt @@ -2,26 +2,26 @@ # uv pip compile -p 3.9 --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.9-requirements.txt annotated-types==0.7.0 # via pydantic -anyio==4.6.2.post1 +anyio==4.8.0 # via # starlette # watchfiles -attrs==24.2.0 +attrs==25.1.0 # via # jsonschema # referencing -bigtree==0.21.3 +bigtree==0.25.0 # via feast (setup.py) -certifi==2024.8.30 +certifi==2025.1.31 # via requests -charset-normalizer==3.4.0 +charset-normalizer==3.4.1 # via requests -click==8.1.7 +click==8.1.8 # via # feast (setup.py) # dask # uvicorn -cloudpickle==3.1.0 +cloudpickle==3.1.1 # via dask colorama==0.4.6 # via feast (setup.py) @@ -35,10 +35,12 @@ dill==0.3.9 # via feast (setup.py) exceptiongroup==1.2.2 # via anyio -fastapi==0.115.4 +fastapi==0.115.8 # via feast (setup.py) -fsspec==2024.10.0 +fsspec==2025.2.0 # via dask +greenlet==3.1.1 + # via sqlalchemy gunicorn==23.0.0 # via # feast (setup.py) @@ -51,11 +53,11 @@ idna==3.10 # via # anyio # requests -importlib-metadata==8.5.0 +importlib-metadata==8.6.1 # via # dask # typeguard -jinja2==3.1.4 +jinja2==3.1.5 # via feast (setup.py) jsonschema==4.23.0 # via feast (setup.py) @@ -65,9 +67,9 @@ locket==1.0.0 # via partd markupsafe==3.0.2 # via jinja2 -mmh3==5.0.1 +mmh3==5.1.0 # via feast (setup.py) -mypy==1.13.0 +mypy==1.15.0 # via sqlalchemy mypy-extensions==1.0.0 # via mypy @@ -76,7 +78,7 @@ numpy==1.26.4 # feast (setup.py) # dask # pandas -packaging==24.1 +packaging==24.2 # via # dask # gunicorn @@ -87,54 +89,54 @@ pandas==2.2.3 # dask-expr partd==1.4.2 # via dask -prometheus-client==0.21.0 +prometheus-client==0.21.1 # via feast (setup.py) -protobuf==4.25.5 +protobuf==5.29.3 # via feast (setup.py) -psutil==6.1.0 +psutil==7.0.0 # via feast (setup.py) pyarrow==18.0.0 # via # feast (setup.py) # dask-expr -pydantic==2.9.2 +pydantic==2.10.6 # via # feast (setup.py) # fastapi -pydantic-core==2.23.4 +pydantic-core==2.27.2 # via pydantic -pygments==2.18.0 +pygments==2.19.1 # via feast (setup.py) -pyjwt==2.9.0 +pyjwt==2.10.1 # via feast (setup.py) python-dateutil==2.9.0.post0 # via pandas python-dotenv==1.0.1 # via uvicorn -pytz==2024.2 +pytz==2025.1 # via pandas pyyaml==6.0.2 # via # feast (setup.py) # dask # uvicorn -referencing==0.35.1 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications requests==2.32.3 # via feast (setup.py) -rpds-py==0.20.0 +rpds-py==0.23.1 # via # jsonschema # referencing -six==1.16.0 +six==1.17.0 # via python-dateutil sniffio==1.3.1 # via anyio -sqlalchemy[mypy]==2.0.36 +sqlalchemy[mypy]==2.0.38 # via feast (setup.py) -starlette==0.41.2 +starlette==0.45.3 # via fastapi tabulate==0.9.0 # via feast (setup.py) @@ -142,15 +144,15 @@ tenacity==8.5.0 # via feast (setup.py) toml==0.10.2 # via feast (setup.py) -tomli==2.0.2 +tomli==2.2.1 # via mypy toolz==1.0.0 # via # dask # partd -tqdm==4.66.6 +tqdm==4.67.1 # via feast (setup.py) -typeguard==4.4.0 +typeguard==4.4.2 # via feast (setup.py) typing-extensions==4.12.2 # via @@ -159,25 +161,26 @@ typing-extensions==4.12.2 # mypy # pydantic # pydantic-core + # referencing # sqlalchemy # starlette # typeguard # uvicorn -tzdata==2024.2 +tzdata==2025.1 # via pandas -urllib3==2.2.3 +urllib3==2.3.0 # via requests -uvicorn[standard]==0.32.0 +uvicorn[standard]==0.34.0 # via # feast (setup.py) # uvicorn-worker -uvicorn-worker==0.2.0 +uvicorn-worker==0.3.0 # via feast (setup.py) uvloop==0.21.0 # via uvicorn -watchfiles==0.24.0 +watchfiles==1.0.4 # via uvicorn -websockets==13.1 +websockets==15.0 # via uvicorn -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata diff --git a/sdk/python/tests/conftest.py b/sdk/python/tests/conftest.py index 24c8f40f742..c46aff681a3 100644 --- a/sdk/python/tests/conftest.py +++ b/sdk/python/tests/conftest.py @@ -57,8 +57,12 @@ location, ) from tests.utils.auth_permissions_util import default_store -from tests.utils.generate_self_signed_certifcate_util import generate_self_signed_cert from tests.utils.http_server import check_port_open, free_port # noqa: E402 +from tests.utils.ssl_certifcates_util import ( + combine_trust_stores, + create_ca_trust_store, + generate_self_signed_cert, +) logger = logging.getLogger(__name__) @@ -81,7 +85,7 @@ def pytest_configure(config): if platform in ["darwin", "windows"]: - multiprocessing.set_start_method("spawn") + multiprocessing.set_start_method("spawn", force=True) else: multiprocessing.set_start_method("fork") config.addinivalue_line( @@ -306,6 +310,10 @@ def pytest_generate_tests(metafunc: pytest.Metafunc): pytest.mark.xdist_group(name=m) for m in c.offline_store_creator.xdist_groups() ] + # Check if there are any test markers associated with the creator and add them. + if c.offline_store_creator.test_markers(): + marks.extend(c.offline_store_creator.test_markers()) + _config_cache[c] = pytest.param(c, marks=marks) configs.append(_config_cache[c]) @@ -514,17 +522,36 @@ def auth_config(request, is_integration_test): return auth_configuration -@pytest.fixture(params=[True, False], scope="module") +@pytest.fixture(scope="module") def tls_mode(request): - is_tls_mode = request.param + is_tls_mode = request.param[0] + output_combined_truststore_path = "" if is_tls_mode: certificates_path = tempfile.mkdtemp() tls_key_path = os.path.join(certificates_path, "key.pem") tls_cert_path = os.path.join(certificates_path, "cert.pem") + generate_self_signed_cert(cert_path=tls_cert_path, key_path=tls_key_path) + is_ca_trust_store_set = request.param[1] + if is_ca_trust_store_set: + # Paths + feast_ca_trust_store_path = os.path.join( + certificates_path, "feast_trust_store.pem" + ) + create_ca_trust_store( + public_key_path=tls_cert_path, + private_key_path=tls_key_path, + output_trust_store_path=feast_ca_trust_store_path, + ) + + # Combine trust stores + output_combined_path = os.path.join( + certificates_path, "combined_trust_store.pem" + ) + combine_trust_stores(feast_ca_trust_store_path, output_combined_path) else: tls_key_path = "" tls_cert_path = "" - return is_tls_mode, tls_key_path, tls_cert_path + return is_tls_mode, tls_key_path, tls_cert_path, output_combined_truststore_path diff --git a/sdk/python/tests/data/data_creator.py b/sdk/python/tests/data/data_creator.py index 5d6cffeb9df..dfe94913e97 100644 --- a/sdk/python/tests/data/data_creator.py +++ b/sdk/python/tests/data/data_creator.py @@ -1,8 +1,8 @@ from datetime import datetime, timedelta, timezone from typing import Dict, List, Optional +from zoneinfo import ZoneInfo import pandas as pd -from zoneinfo import ZoneInfo from feast.types import FeastType, Float32, Int32, Int64, String from feast.utils import _utc_now @@ -84,6 +84,8 @@ def get_feature_values_for_dtype( def create_document_dataset() -> pd.DataFrame: data = { "item_id": [1, 2, 3], + "string_feature": ["a", "b", "c"], + "float_feature": [1.0, 2.0, 3.0], "embedding_float": [[4.0, 5.0], [1.0, 2.0], [3.0, 4.0]], "embedding_double": [[4.0, 5.0], [1.0, 2.0], [3.0, 4.0]], "ts": [ diff --git a/sdk/python/tests/doctest/test_all.py b/sdk/python/tests/doctest/test_all.py index d1b2161252f..de032264e6d 100644 --- a/sdk/python/tests/doctest/test_all.py +++ b/sdk/python/tests/doctest/test_all.py @@ -77,9 +77,11 @@ def test_docstrings(): full_name = package.__name__ + "." + name try: - temp_module = importlib.import_module(full_name) - if is_pkg: - next_packages.append(temp_module) + # https://github.com/feast-dev/feast/issues/5088 + if "ikv" not in full_name and "milvus" not in full_name: + temp_module = importlib.import_module(full_name) + if is_pkg: + next_packages.append(temp_module) except ModuleNotFoundError: pass diff --git a/sdk/python/tests/example_repos/example_feature_repo_1.py b/sdk/python/tests/example_repos/example_feature_repo_1.py index daf7b7e7e6f..1671bd0ae3a 100644 --- a/sdk/python/tests/example_repos/example_feature_repo_1.py +++ b/sdk/python/tests/example_repos/example_feature_repo_1.py @@ -118,8 +118,15 @@ name="document_embeddings", entities=[item], schema=[ - Field(name="Embeddings", dtype=Array(Float32)), + Field( + name="Embeddings", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="L2", + ), Field(name="item_id", dtype=String), + Field(name="content", dtype=String), + Field(name="title", dtype=String), ], source=rag_documents_source, ttl=timedelta(hours=24), diff --git a/sdk/python/tests/example_repos/example_feature_repo_with_bfvs.py b/sdk/python/tests/example_repos/example_feature_repo_with_bfvs.py index e0f75c0c6ff..9ee05b47fe4 100644 --- a/sdk/python/tests/example_repos/example_feature_repo_with_bfvs.py +++ b/sdk/python/tests/example_repos/example_feature_repo_with_bfvs.py @@ -18,6 +18,8 @@ driver_hourly_stats_view = BatchFeatureView( name="driver_hourly_stats", entities=[driver], + mode="python", + udf=lambda x: x, ttl=timedelta(days=1), schema=[ Field(name="conv_rate", dtype=Float32), @@ -41,6 +43,8 @@ global_stats_feature_view = BatchFeatureView( name="global_daily_stats", entities=None, + mode="python", + udf=lambda x: x, ttl=timedelta(days=1), schema=[ Field(name="num_rides", dtype=Int32), diff --git a/sdk/python/tests/example_repos/example_rag_feature_repo.py b/sdk/python/tests/example_repos/example_rag_feature_repo.py new file mode 100644 index 00000000000..d87a2a34df1 --- /dev/null +++ b/sdk/python/tests/example_repos/example_rag_feature_repo.py @@ -0,0 +1,47 @@ +from datetime import timedelta + +from feast import Entity, FeatureView, Field, FileSource +from feast.types import Array, Float32, Int64, String, UnixTimestamp, ValueType + +# This is for Milvus +# Note that file source paths are not validated, so there doesn't actually need to be any data +# at the paths for these file sources. Since these paths are effectively fake, this example +# feature repo should not be used for historical retrieval. + +rag_documents_source = FileSource( + path="data/embedded_documents.parquet", + timestamp_field="event_timestamp", + created_timestamp_column="created_timestamp", +) + +item = Entity( + name="item_id", # The name is derived from this argument, not object name. + join_keys=["item_id"], + value_type=ValueType.INT64, +) + +author = Entity( + name="author_id", + join_keys=["author_id"], + value_type=ValueType.STRING, +) + +document_embeddings = FeatureView( + name="embedded_documents", + entities=[item, author], + schema=[ + Field( + name="vector", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="COSINE", + ), + Field(name="item_id", dtype=Int64), + Field(name="author_id", dtype=String), + Field(name="created_timestamp", dtype=UnixTimestamp), + Field(name="sentence_chunks", dtype=String), + Field(name="event_timestamp", dtype=UnixTimestamp), + ], + source=rag_documents_source, + ttl=timedelta(hours=24), +) diff --git a/sdk/python/tests/foo_provider.py b/sdk/python/tests/foo_provider.py index 570a6d4f8d5..2aa674c0aa5 100644 --- a/sdk/python/tests/foo_provider.py +++ b/sdk/python/tests/foo_provider.py @@ -150,6 +150,7 @@ def retrieve_online_documents( config: RepoConfig, table: FeatureView, requested_feature: str, + requested_features: Optional[List[str]], query: List[float], top_k: int, distance_metric: Optional[str] = None, @@ -163,6 +164,24 @@ def retrieve_online_documents( ]: return [] + def retrieve_online_documents_v2( + self, + config: RepoConfig, + table: FeatureView, + requested_features: List[str], + query: Optional[List[float]], + top_k: int, + distance_metric: Optional[str] = None, + query_string: Optional[str] = None, + ) -> List[ + Tuple[ + Optional[datetime], + Optional[EntityKeyProto], + Optional[Dict[str, ValueProto]], + ] + ]: + return [] + def validate_data_source( self, config: RepoConfig, diff --git a/sdk/python/tests/integration/conftest.py b/sdk/python/tests/integration/conftest.py index 82f80b89927..21c9051d0d7 100644 --- a/sdk/python/tests/integration/conftest.py +++ b/sdk/python/tests/integration/conftest.py @@ -1,4 +1,7 @@ import logging +import random +import time +from multiprocessing import Manager import pytest from testcontainers.keycloak import KeycloakContainer @@ -9,14 +12,30 @@ from tests.utils.auth_permissions_util import setup_permissions_on_keycloak logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +shared_state = Manager().dict() @pytest.fixture(scope="session") def start_keycloak_server(): + # Add random sleep between 0 and 2 before checking the state to avoid concurrency issues. + random_sleep_time = random.uniform(0, 2) + time.sleep(random_sleep_time) + + # If the Keycloak instance is already started (in any worker), reuse it + if shared_state.get("keycloak_started", False): + return shared_state["keycloak_url"] logger.info("Starting keycloak instance") with KeycloakContainer("quay.io/keycloak/keycloak:24.0.1") as keycloak_container: setup_permissions_on_keycloak(keycloak_container.get_client()) - yield keycloak_container.get_url() + shared_state["keycloak_started"] = True + shared_state["keycloak_url"] = keycloak_container.get_url() + yield shared_state["keycloak_url"] + + # After the fixture is done, cleanup the shared state + del shared_state["keycloak_started"] + del shared_state["keycloak_url"] @pytest.fixture(scope="session") diff --git a/sdk/python/tests/integration/feature_repos/repo_configuration.py b/sdk/python/tests/integration/feature_repos/repo_configuration.py index c688a848362..54129f23c6e 100644 --- a/sdk/python/tests/integration/feature_repos/repo_configuration.py +++ b/sdk/python/tests/integration/feature_repos/repo_configuration.py @@ -49,6 +49,7 @@ FileDataSourceCreator, RemoteOfflineOidcAuthStoreDataSourceCreator, RemoteOfflineStoreDataSourceCreator, + RemoteOfflineTlsStoreDataSourceCreator, ) from tests.integration.feature_repos.universal.data_sources.redshift import ( RedshiftDataSourceCreator, @@ -85,6 +86,7 @@ ) DYNAMO_CONFIG = {"type": "dynamodb", "region": "us-west-2"} +MILVUS_CONFIG = {"type": "milvus"} REDIS_CONFIG = {"type": "redis", "connection_string": "localhost:6379,db=0"} REDIS_CLUSTER_CONFIG = { "type": "redis", @@ -131,6 +133,7 @@ ("local", DuckDBDeltaDataSourceCreator), ("local", RemoteOfflineStoreDataSourceCreator), ("local", RemoteOfflineOidcAuthStoreDataSourceCreator), + ("local", RemoteOfflineTlsStoreDataSourceCreator), ] if os.getenv("FEAST_IS_LOCAL_TEST", "False") == "True": @@ -160,6 +163,7 @@ AVAILABLE_ONLINE_STORES["datastore"] = ("datastore", None) AVAILABLE_ONLINE_STORES["snowflake"] = (SNOWFLAKE_CONFIG, None) AVAILABLE_ONLINE_STORES["bigtable"] = (BIGTABLE_CONFIG, None) + # AVAILABLE_ONLINE_STORES["milvus"] = (MILVUS_CONFIG, None) # Uncomment to test using private IKV account. Currently not enabled as # there is no dedicated IKV instance for CI testing and there is no @@ -557,6 +561,9 @@ def construct_test_environment( cache_ttl_seconds=1, ) + if test_repo_config.online_store in ["milvus", "pgvector", "qdrant"]: + entity_key_serialization_version = 3 + environment_params = { "name": project, "provider": test_repo_config.provider, diff --git a/sdk/python/tests/integration/feature_repos/universal/data_source_creator.py b/sdk/python/tests/integration/feature_repos/universal/data_source_creator.py index 513a94ee210..467db4dddce 100644 --- a/sdk/python/tests/integration/feature_repos/universal/data_source_creator.py +++ b/sdk/python/tests/integration/feature_repos/universal/data_source_creator.py @@ -2,6 +2,7 @@ from typing import Dict, Optional import pandas as pd +from _pytest.mark import MarkDecorator from feast.data_source import DataSource from feast.feature_logging import LoggingDestination @@ -64,3 +65,11 @@ def teardown(self): @staticmethod def xdist_groups() -> list[str]: return [] + + @staticmethod + def test_markers() -> list[MarkDecorator]: + """ + return the array of test markers to add dynamically to the tests created by this creator method. override this method in your implementations. By default, it will not add any markers. + :return: + """ + return [] diff --git a/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py b/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py index 35325c2737e..6f6e5d68133 100644 --- a/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py +++ b/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py @@ -11,7 +11,9 @@ import pandas as pd import pyarrow as pa import pyarrow.parquet as pq +import pytest import yaml +from _pytest.mark import MarkDecorator from minio import Minio from testcontainers.core.generic import DockerContainer from testcontainers.core.waiting_utils import wait_for_logs @@ -35,6 +37,7 @@ ) from tests.utils.auth_permissions_util import include_auth_config from tests.utils.http_server import check_port_open, free_port # noqa: E402 +from tests.utils.ssl_certifcates_util import generate_self_signed_cert logger = logging.getLogger(__name__) @@ -371,6 +374,10 @@ def __init__(self, project_name: str, *args, **kwargs): self.server_port: int = 0 self.proc: Optional[Popen[bytes]] = None + @staticmethod + def test_markers() -> list[MarkDecorator]: + return [pytest.mark.rbac_remote_integration_test] + def setup(self, registry: RegistryConfig): parent_offline_config = super().create_offline_store_config() config = RepoConfig( @@ -410,11 +417,74 @@ def setup(self, registry: RegistryConfig): ) return "grpc+tcp://{}:{}".format(host, self.server_port) + +class RemoteOfflineTlsStoreDataSourceCreator(FileDataSourceCreator): + def __init__(self, project_name: str, *args, **kwargs): + super().__init__(project_name) + self.server_port: int = 0 + self.proc: Optional[Popen[bytes]] = None + + @staticmethod + def test_markers() -> list[MarkDecorator]: + return [pytest.mark.rbac_remote_integration_test] + + def setup(self, registry: RegistryConfig): + parent_offline_config = super().create_offline_store_config() + config = RepoConfig( + project=self.project_name, + provider="local", + offline_store=parent_offline_config, + registry=registry.path, + entity_key_serialization_version=2, + ) + + certificates_path = tempfile.mkdtemp() + tls_key_path = os.path.join(certificates_path, "key.pem") + self.tls_cert_path = os.path.join(certificates_path, "cert.pem") + generate_self_signed_cert(cert_path=self.tls_cert_path, key_path=tls_key_path) + + repo_path = Path(tempfile.mkdtemp()) + with open(repo_path / "feature_store.yaml", "w") as outfile: + yaml.dump(config.model_dump(by_alias=True), outfile) + repo_path = repo_path.resolve() + + self.server_port = free_port() + host = "0.0.0.0" + cmd = [ + "feast", + "-c" + str(repo_path), + "serve_offline", + "--host", + host, + "--port", + str(self.server_port), + "--key", + str(tls_key_path), + "--cert", + str(self.tls_cert_path), + ] + self.proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + + _time_out_sec: int = 60 + # Wait for server to start + wait_retry_backoff( + lambda: (None, check_port_open(host, self.server_port)), + timeout_secs=_time_out_sec, + timeout_msg=f"Unable to start the feast remote offline server in {_time_out_sec} seconds at port={self.server_port}", + ) + return "grpc+tls://{}:{}".format(host, self.server_port) + def create_offline_store_config(self) -> FeastConfigBaseModel: - self.remote_offline_store_config = RemoteOfflineStoreConfig( - type="remote", host="0.0.0.0", port=self.server_port + remote_offline_store_config = RemoteOfflineStoreConfig( + type="remote", + host="0.0.0.0", + port=self.server_port, + scheme="https", + cert=self.tls_cert_path, ) - return self.remote_offline_store_config + return remote_offline_store_config def teardown(self): super().teardown() @@ -455,6 +525,10 @@ def __init__(self, project_name: str, *args, **kwargs): def xdist_groups() -> list[str]: return ["keycloak"] + @staticmethod + def test_markers() -> list[MarkDecorator]: + return [pytest.mark.rbac_remote_integration_test] + def setup(self, registry: RegistryConfig): parent_offline_config = super().create_offline_store_config() config = RepoConfig( @@ -499,10 +573,10 @@ def setup(self, registry: RegistryConfig): return "grpc+tcp://{}:{}".format(host, self.server_port) def create_offline_store_config(self) -> FeastConfigBaseModel: - self.remote_offline_store_config = RemoteOfflineStoreConfig( + remote_offline_store_config = RemoteOfflineStoreConfig( type="remote", host="0.0.0.0", port=self.server_port ) - return self.remote_offline_store_config + return remote_offline_store_config def get_keycloak_url(self): return self.keycloak_url diff --git a/sdk/python/tests/integration/feature_repos/universal/feature_views.py b/sdk/python/tests/integration/feature_repos/universal/feature_views.py index 11ddcb0ecc6..7fc5149d240 100644 --- a/sdk/python/tests/integration/feature_repos/universal/feature_views.py +++ b/sdk/python/tests/integration/feature_repos/universal/feature_views.py @@ -17,7 +17,7 @@ from feast.data_source import DataSource, RequestSource from feast.feature_view_projection import FeatureViewProjection from feast.on_demand_feature_view import PandasTransformation, SubstraitTransformation -from feast.types import Array, FeastType, Float32, Float64, Int32, Int64 +from feast.types import Array, FeastType, Float32, Float64, Int32, Int64, String from tests.integration.feature_repos.universal.entities import ( customer, driver, @@ -160,8 +160,20 @@ def create_item_embeddings_feature_view(source, infer_features: bool = False): schema=None if infer_features else [ - Field(name="embedding_double", dtype=Array(Float64)), - Field(name="embedding_float", dtype=Array(Float32)), + Field( + name="embedding_double", + dtype=Array(Float64), + vector_index=True, + vector_search_metric="L2", + ), + Field( + name="embedding_float", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="L2", + ), + Field(name="string_feature", dtype=String), + Field(name="float_feature", dtype=Float32), ], source=source, ttl=timedelta(hours=2), @@ -183,6 +195,7 @@ def create_item_embeddings_batch_feature_view( ], source=source, ttl=timedelta(hours=2), + udf=lambda x: x, ) return item_embeddings_feature_view @@ -225,6 +238,7 @@ def create_driver_hourly_stats_batch_feature_view( source=source, ttl=timedelta(hours=2), tags=TAGS, + udf=lambda x: x, ) return driver_stats_feature_view diff --git a/sdk/python/tests/integration/feature_repos/universal/online_store/couchbase.py b/sdk/python/tests/integration/feature_repos/universal/online_store/couchbase.py index f2ba12da8da..2723ff13a30 100644 --- a/sdk/python/tests/integration/feature_repos/universal/online_store/couchbase.py +++ b/sdk/python/tests/integration/feature_repos/universal/online_store/couchbase.py @@ -66,7 +66,7 @@ def create_online_store(self) -> Dict[str, object]: # Return the configuration for Feast return { - "type": "couchbase", + "type": "couchbase.online", "connection_string": "couchbase://127.0.0.1", "user": self.username, "password": self.password, diff --git a/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py b/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py new file mode 100644 index 00000000000..c02bd144016 --- /dev/null +++ b/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py @@ -0,0 +1,43 @@ +from typing import Any, Dict + +import docker +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_for_logs + +from tests.integration.feature_repos.universal.online_store_creator import ( + OnlineStoreCreator, +) + + +class MilvusOnlineStoreCreator(OnlineStoreCreator): + def __init__(self, project_name: str, **kwargs): + super().__init__(project_name) + self.fixed_port = 19530 + self.container = DockerContainer("milvusdb/milvus:v2.4.4").with_exposed_ports( + self.fixed_port + ) + self.client = docker.from_env() + + def create_online_store(self) -> Dict[str, Any]: + self.container.start() + # Wait for Milvus server to be ready + # log_string_to_wait_for = "Ready to accept connections" + log_string_to_wait_for = "" + wait_for_logs( + container=self.container, predicate=log_string_to_wait_for, timeout=30 + ) + host = "localhost" + port = self.container.get_exposed_port(self.fixed_port) + return { + "type": "milvus", + "host": host, + "port": int(port), + "index_type": "IVF_FLAT", + "metric_type": "L2", + "embedding_dim": 2, + "vector_enabled": True, + "nlist": 1, + } + + def teardown(self): + self.container.stop() diff --git a/sdk/python/tests/integration/materialization/test_snowflake.py b/sdk/python/tests/integration/materialization/test_snowflake.py index 5f01641c3b5..a783eac0380 100644 --- a/sdk/python/tests/integration/materialization/test_snowflake.py +++ b/sdk/python/tests/integration/materialization/test_snowflake.py @@ -178,9 +178,9 @@ def test_snowflake_materialization_consistency_internal_with_lists( assert actual_value is not None, f"Response: {response_dict}" if feature_dtype == "float": for actual_num, expected_num in zip(actual_value, expected_value): - assert ( - abs(actual_num - expected_num) < 1e-6 - ), f"Response: {response_dict}, Expected: {expected_value}" + assert abs(actual_num - expected_num) < 1e-6, ( + f"Response: {response_dict}, Expected: {expected_value}" + ) else: assert actual_value == expected_value diff --git a/sdk/python/tests/integration/offline_store/test_validation.py b/sdk/python/tests/integration/offline_store/test_dqm_validation.py similarity index 100% rename from sdk/python/tests/integration/offline_store/test_validation.py rename to sdk/python/tests/integration/offline_store/test_dqm_validation.py diff --git a/sdk/python/tests/integration/offline_store/test_feature_logging.py b/sdk/python/tests/integration/offline_store/test_feature_logging.py index 32f506f90b2..53147d242ef 100644 --- a/sdk/python/tests/integration/offline_store/test_feature_logging.py +++ b/sdk/python/tests/integration/offline_store/test_feature_logging.py @@ -106,7 +106,15 @@ def retrieve(): ) persisted_logs = persisted_logs[expected_columns] + logs_df = logs_df[expected_columns] + + # Convert timezone-aware datetime values to naive datetime values + logs_df[LOG_TIMESTAMP_FIELD] = logs_df[LOG_TIMESTAMP_FIELD].dt.tz_localize(None) + persisted_logs[LOG_TIMESTAMP_FIELD] = persisted_logs[ + LOG_TIMESTAMP_FIELD + ].dt.tz_localize(None) + pd.testing.assert_frame_equal( logs_df.sort_values(REQUEST_ID_FIELD).reset_index(drop=True), persisted_logs.sort_values(REQUEST_ID_FIELD).reset_index(drop=True), diff --git a/sdk/python/tests/integration/offline_store/test_universal_historical_retrieval.py b/sdk/python/tests/integration/offline_store/test_universal_historical_retrieval.py index 97ad54251fe..37df649386c 100644 --- a/sdk/python/tests/integration/offline_store/test_universal_historical_retrieval.py +++ b/sdk/python/tests/integration/offline_store/test_universal_historical_retrieval.py @@ -14,7 +14,7 @@ from feast.infra.offline_stores.offline_utils import ( DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL, ) -from feast.types import Float32, Int32 +from feast.types import Float32, Int32, String from feast.utils import _utc_now from tests.integration.feature_repos.repo_configuration import ( construct_universal_feature_views, @@ -23,6 +23,7 @@ from tests.integration.feature_repos.universal.data_sources.file import ( RemoteOfflineOidcAuthStoreDataSourceCreator, RemoteOfflineStoreDataSourceCreator, + RemoteOfflineTlsStoreDataSourceCreator, ) from tests.integration.feature_repos.universal.data_sources.snowflake import ( SnowflakeDataSourceCreator, @@ -166,6 +167,7 @@ def test_historical_features_main( environment.data_source_creator, ( RemoteOfflineStoreDataSourceCreator, + RemoteOfflineTlsStoreDataSourceCreator, RemoteOfflineOidcAuthStoreDataSourceCreator, ), ): @@ -637,3 +639,100 @@ def test_historical_features_containing_backfills(environment): actual_df, sort_by=["driver_id"], ) + + +@pytest.mark.integration +@pytest.mark.universal_offline_stores +@pytest.mark.parametrize("full_feature_names", [True, False], ids=lambda v: str(v)) +def test_historical_features_field_mapping( + environment, universal_data_sources, full_feature_names +): + store = environment.feature_store + + # (entities, datasets, data_sources) = universal_data_sources + # feature_views = construct_universal_feature_views(data_sources) + + now = datetime.now().replace(microsecond=0, second=0, minute=0) + tomorrow = now + timedelta(days=1) + day_after_tomorrow = now + timedelta(days=2) + + entity_df = pd.DataFrame( + data=[ + {"driver_id": 1001, "event_timestamp": day_after_tomorrow}, + {"driver_id": 1002, "event_timestamp": day_after_tomorrow}, + ] + ) + + driver_stats_df = pd.DataFrame( + data=[ + { + "id": 1001, + "avg_daily_trips": 20, + "event_timestamp": now, + "created": tomorrow, + }, + { + "id": 1002, + "avg_daily_trips": 40, + "event_timestamp": tomorrow, + "created": now, + }, + ] + ) + + expected_df = pd.DataFrame( + data=[ + { + "driver_id": 1001, + "event_timestamp": day_after_tomorrow, + "avg_daily_trips": 20, + }, + { + "driver_id": 1002, + "event_timestamp": day_after_tomorrow, + "avg_daily_trips": 40, + }, + ] + ) + + driver_stats_data_source = environment.data_source_creator.create_data_source( + df=driver_stats_df, + destination_name=f"test_driver_stats_{int(time.time_ns())}_{random.randint(1000, 9999)}", + timestamp_field="event_timestamp", + created_timestamp_column="created", + # Map original "id" column to "driver_id" join key + field_mapping={"id": "driver_id"}, + ) + + driver = Entity(name="driver", join_keys=["driver_id"]) + driver_fv = FeatureView( + name="driver_stats", + entities=[driver], + schema=[ + Field(name="driver_id", dtype=String), + Field(name="avg_daily_trips", dtype=Int32), + ], + source=driver_stats_data_source, + ) + + store.apply([driver, driver_fv]) + + offline_job = store.get_historical_features( + entity_df=entity_df, + features=["driver_stats:avg_daily_trips"], + full_feature_names=False, + ) + + start_time = _utc_now() + actual_df = offline_job.to_df() + + print(f"actual_df shape: {actual_df.shape}") + end_time = _utc_now() + print(str(f"Time to execute job_from_df.to_df() = '{(end_time - start_time)}'\n")) + + assert sorted(expected_df.columns) == sorted(actual_df.columns) + validate_dataframes( + expected_df, + actual_df, + sort_by=["driver_id"], + ) diff --git a/sdk/python/tests/integration/online_store/test_remote_online_store.py b/sdk/python/tests/integration/online_store/test_remote_online_store.py index 2519d3d9bef..eb03fd0c3c5 100644 --- a/sdk/python/tests/integration/online_store/test_remote_online_store.py +++ b/sdk/python/tests/integration/online_store/test_remote_online_store.py @@ -22,8 +22,15 @@ @pytest.mark.integration +@pytest.mark.rbac_remote_integration_test +@pytest.mark.parametrize( + "tls_mode", [("True", "True"), ("True", "False"), ("False", "")], indirect=True +) def test_remote_online_store_read(auth_config, tls_mode): - with tempfile.TemporaryDirectory() as remote_server_tmp_dir, tempfile.TemporaryDirectory() as remote_client_tmp_dir: + with ( + tempfile.TemporaryDirectory() as remote_server_tmp_dir, + tempfile.TemporaryDirectory() as remote_client_tmp_dir, + ): permissions_list = [ Permission( name="online_list_fv_perm", @@ -53,13 +60,13 @@ def test_remote_online_store_read(auth_config, tls_mode): ) ) assert None not in (server_store, server_url, registry_path) - _, _, tls_cert_path = tls_mode + client_store = _create_remote_client_feature_store( temp_dir=remote_client_tmp_dir, server_registry_path=str(registry_path), feature_server_url=server_url, auth_config=auth_config, - tls_cert_path=tls_cert_path, + tls_mode=tls_mode, ) assert client_store is not None _assert_non_existing_entity_feature_views_entity( @@ -169,7 +176,7 @@ def _create_server_store_spin_feature_server( ): store = default_store(str(temp_dir), auth_config, permissions_list) feast_server_port = free_port() - is_tls_mode, tls_key_path, tls_cert_path = tls_mode + is_tls_mode, tls_key_path, tls_cert_path, ca_trust_store_path = tls_mode server_url = next( start_feature_server( @@ -177,6 +184,7 @@ def _create_server_store_spin_feature_server( server_port=feast_server_port, tls_key_path=tls_key_path, tls_cert_path=tls_cert_path, + ca_trust_store_path=ca_trust_store_path, ) ) if is_tls_mode: @@ -200,20 +208,33 @@ def _create_remote_client_feature_store( server_registry_path: str, feature_server_url: str, auth_config: str, - tls_cert_path: str = "", + tls_mode, ) -> FeatureStore: project_name = "REMOTE_ONLINE_CLIENT_PROJECT" runner = CliRunner() result = runner.run(["init", project_name], cwd=temp_dir) assert result.returncode == 0 repo_path = os.path.join(temp_dir, project_name, "feature_repo") - _overwrite_remote_client_feature_store_yaml( - repo_path=str(repo_path), - registry_path=server_registry_path, - feature_server_url=feature_server_url, - auth_config=auth_config, - tls_cert_path=tls_cert_path, - ) + is_tls_mode, _, tls_cert_path, ca_trust_store_path = tls_mode + if is_tls_mode and not ca_trust_store_path: + _overwrite_remote_client_feature_store_yaml( + repo_path=str(repo_path), + registry_path=server_registry_path, + feature_server_url=feature_server_url, + auth_config=auth_config, + tls_cert_path=tls_cert_path, + ) + else: + _overwrite_remote_client_feature_store_yaml( + repo_path=str(repo_path), + registry_path=server_registry_path, + feature_server_url=feature_server_url, + auth_config=auth_config, + ) + + if is_tls_mode and ca_trust_store_path: + # configure trust store path only when is_tls_mode and ca_trust_store_path exists. + os.environ["FEAST_CA_CERT_FILE_PATH"] = ca_trust_store_path return FeatureStore(repo_path=repo_path) diff --git a/sdk/python/tests/integration/online_store/test_universal_online.py b/sdk/python/tests/integration/online_store/test_universal_online.py index 4074dcb194e..64122d2c861 100644 --- a/sdk/python/tests/integration/online_store/test_universal_online.py +++ b/sdk/python/tests/integration/online_store/test_universal_online.py @@ -614,6 +614,10 @@ def eventually_apply() -> Tuple[None, bool]: online_features = fs.get_online_features( features=features, entity_rows=entity_rows ).to_dict() + + # Debugging print statement + print("Online features values:", online_features["value"]) + assert all(v is None for v in online_features["value"]) @@ -858,8 +862,8 @@ def assert_feature_service_entity_mapping_correctness( @pytest.mark.integration @pytest.mark.universal_online_stores(only=["pgvector", "elasticsearch", "qdrant"]) -def test_retrieve_online_documents(vectordb_environment, fake_document_data): - fs = vectordb_environment.feature_store +def test_retrieve_online_documents(environment, fake_document_data): + fs = environment.feature_store df, data_source = fake_document_data item_embeddings_feature_view = create_item_embeddings_feature_view(data_source) fs.apply([item_embeddings_feature_view, item()]) @@ -891,3 +895,28 @@ def test_retrieve_online_documents(vectordb_environment, fake_document_data): top_k=2, distance_metric="wrong", ).to_dict() + + +@pytest.mark.integration +@pytest.mark.universal_online_stores(only=["milvus"]) +def test_retrieve_online_milvus_documents(environment, fake_document_data): + fs = environment.feature_store + df, data_source = fake_document_data + item_embeddings_feature_view = create_item_embeddings_feature_view(data_source) + fs.apply([item_embeddings_feature_view, item()]) + fs.write_to_online_store("item_embeddings", df) + documents = fs.retrieve_online_documents( + feature=None, + features=[ + "item_embeddings:embedding_float", + "item_embeddings:item_id", + "item_embeddings:string_feature", + ], + query=[1.0, 2.0], + top_k=2, + distance_metric="L2", + ).to_dict() + assert len(documents["embedding_float"]) == 2 + + assert len(documents["item_id"]) == 2 + assert documents["item_id"] == [2, 3] diff --git a/sdk/python/tests/integration/registration/test_universal_registry.py b/sdk/python/tests/integration/registration/test_universal_registry.py index 5e06247ebbb..3819d168d78 100644 --- a/sdk/python/tests/integration/registration/test_universal_registry.py +++ b/sdk/python/tests/integration/registration/test_universal_registry.py @@ -344,7 +344,10 @@ def mock_remote_registry(): marks=pytest.mark.xdist_group(name="mysql_registry"), ), lazy_fixture("sqlite_registry"), - lazy_fixture("mock_remote_registry"), + pytest.param( + lazy_fixture("mock_remote_registry"), + marks=pytest.mark.rbac_remote_integration_test, + ), ] sql_fixtures = [ diff --git a/sdk/python/tests/integration/registration/test_universal_types.py b/sdk/python/tests/integration/registration/test_universal_types.py index 928d05ad31e..9d0b620c083 100644 --- a/sdk/python/tests/integration/registration/test_universal_types.py +++ b/sdk/python/tests/integration/registration/test_universal_types.py @@ -171,9 +171,9 @@ def test_feature_get_online_features_types_match( if config.feature_is_list: for feature in online_features["value"]: assert isinstance(feature, list), "Feature value should be a list" - assert ( - config.has_empty_list or len(feature) > 0 - ), "List of values should not be empty" + assert config.has_empty_list or len(feature) > 0, ( + "List of values should not be empty" + ) for element in feature: assert isinstance(element, expected_dtype) else: @@ -224,7 +224,9 @@ def assert_expected_historical_feature_types( dtype_checkers = feature_dtype_to_expected_historical_feature_dtype[feature_dtype] assert any( check(historical_features_df.dtypes["value"]) for check in dtype_checkers - ), f"Failed to match feature type {historical_features_df.dtypes['value']} with checkers {dtype_checkers}" + ), ( + f"Failed to match feature type {historical_features_df.dtypes['value']} with checkers {dtype_checkers}" + ) def assert_feature_list_types( diff --git a/sdk/python/tests/unit/cli/test_cli.py b/sdk/python/tests/unit/cli/test_cli.py index a286c847dd2..b09eabebb80 100644 --- a/sdk/python/tests/unit/cli/test_cli.py +++ b/sdk/python/tests/unit/cli/test_cli.py @@ -170,3 +170,23 @@ def setup_third_party_registry_store_repo( ) yield repo_path + + +def test_cli_configuration(): + """ + Unit test for the 'feast configuration' command + """ + runner = CliRunner() + + with setup_third_party_provider_repo("local") as repo_path: + # Run the 'feast configuration' command + return_code, output = runner.run_with_output(["configuration"], cwd=repo_path) + + # Assertions + assertpy.assert_that(return_code).is_equal_to(0) + assertpy.assert_that(output).contains(b"project: foo") + assertpy.assert_that(output).contains(b"provider: local") + assertpy.assert_that(output).contains(b"type: sqlite") + assertpy.assert_that(output).contains(b"path: data/online_store.db") + assertpy.assert_that(output).contains(b"type: file") + assertpy.assert_that(output).contains(b"entity_key_serialization_version: 2") diff --git a/sdk/python/tests/unit/cli/test_cli_apply_duplicates.py b/sdk/python/tests/unit/cli/test_cli_apply_duplicates.py index e331a1cc2de..b3e350fe73c 100644 --- a/sdk/python/tests/unit/cli/test_cli_apply_duplicates.py +++ b/sdk/python/tests/unit/cli/test_cli_apply_duplicates.py @@ -20,7 +20,10 @@ def test_cli_apply_duplicate_data_source_names() -> None: def run_simple_apply_test(example_repo_file_name: str, expected_error: bytes): - with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name: + with ( + tempfile.TemporaryDirectory() as repo_dir_name, + tempfile.TemporaryDirectory() as data_dir_name, + ): runner = CliRunner() # Construct an example repo in a temporary dir repo_path = Path(repo_dir_name) @@ -51,7 +54,10 @@ def test_cli_apply_imported_featureview() -> None: """ Tests that applying a feature view imported from a separate Python file is successful. """ - with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name: + with ( + tempfile.TemporaryDirectory() as repo_dir_name, + tempfile.TemporaryDirectory() as data_dir_name, + ): runner = CliRunner() # Construct an example repo in a temporary dir repo_path = Path(repo_dir_name) @@ -97,7 +103,10 @@ def test_cli_apply_imported_featureview_with_duplication() -> None: Tests that applying feature views with duplicated names is not possible, even if one of the duplicated feature views is imported from another file. """ - with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name: + with ( + tempfile.TemporaryDirectory() as repo_dir_name, + tempfile.TemporaryDirectory() as data_dir_name, + ): runner = CliRunner() # Construct an example repo in a temporary dir repo_path = Path(repo_dir_name) @@ -152,7 +161,10 @@ def test_cli_apply_duplicated_featureview_names_multiple_py_files() -> None: """ Test apply feature views with duplicated names from multiple py files in a feature repo using CLI """ - with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name: + with ( + tempfile.TemporaryDirectory() as repo_dir_name, + tempfile.TemporaryDirectory() as data_dir_name, + ): runner = CliRunner() # Construct an example repo in a temporary dir repo_path = Path(repo_dir_name) diff --git a/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py b/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py index b8f8cc42474..307ba4058c1 100644 --- a/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py +++ b/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py @@ -71,6 +71,68 @@ def test_pull_latest_from_table_with_nested_timestamp_or_query(mock_get_spark_se assert retrieval_job.query.strip() == expected_query.strip() +@patch( + "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" +) +def test_pull_latest_from_table_with_nested_timestamp_or_query_and_date_partition_column_set( + mock_get_spark_session, +): + mock_spark_session = MagicMock() + mock_get_spark_session.return_value = mock_spark_session + + test_repo_config = RepoConfig( + project="test_project", + registry="test_registry", + provider="local", + offline_store=SparkOfflineStoreConfig(type="spark"), + ) + + test_data_source = SparkSource( + name="test_nested_batch_source", + description="test_nested_batch_source", + table="offline_store_database_name.offline_store_table_name", + timestamp_field="nested_timestamp", + field_mapping={ + "event_header.event_published_datetime_utc": "nested_timestamp", + }, + date_partition_column="effective_date", + ) + + # Define the parameters for the method + join_key_columns = ["key1", "key2"] + feature_name_columns = ["feature1", "feature2"] + timestamp_field = "event_header.event_published_datetime_utc" + created_timestamp_column = "created_timestamp" + start_date = datetime(2021, 1, 1) + end_date = datetime(2021, 1, 2) + + # Call the method + retrieval_job = SparkOfflineStore.pull_latest_from_table_or_query( + config=test_repo_config, + data_source=test_data_source, + join_key_columns=join_key_columns, + feature_name_columns=feature_name_columns, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + start_date=start_date, + end_date=end_date, + ) + + expected_query = """SELECT + key1, key2, feature1, feature2, nested_timestamp, created_timestamp + + FROM ( + SELECT key1, key2, feature1, feature2, event_header.event_published_datetime_utc AS nested_timestamp, created_timestamp, + ROW_NUMBER() OVER(PARTITION BY key1, key2 ORDER BY event_header.event_published_datetime_utc DESC, created_timestamp DESC) AS feast_row_ + FROM `offline_store_database_name`.`offline_store_table_name` t1 + WHERE event_header.event_published_datetime_utc BETWEEN TIMESTAMP('2021-01-01 00:00:00.000000') AND TIMESTAMP('2021-01-02 00:00:00.000000') AND effective_date >= '2021-01-01' AND effective_date <= '2021-01-02' + ) t2 + WHERE feast_row_ = 1""" # noqa: W293, W291 + + assert isinstance(retrieval_job, RetrievalJob) + assert retrieval_job.query.strip() == expected_query.strip() + + @patch( "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" ) @@ -127,3 +189,62 @@ def test_pull_latest_from_table_without_nested_timestamp_or_query( assert isinstance(retrieval_job, RetrievalJob) assert retrieval_job.query.strip() == expected_query.strip() + + +@patch( + "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" +) +def test_pull_latest_from_table_without_nested_timestamp_or_query_and_date_partition_column_set( + mock_get_spark_session, +): + mock_spark_session = MagicMock() + mock_get_spark_session.return_value = mock_spark_session + + test_repo_config = RepoConfig( + project="test_project", + registry="test_registry", + provider="local", + offline_store=SparkOfflineStoreConfig(type="spark"), + ) + + test_data_source = SparkSource( + name="test_batch_source", + description="test_nested_batch_source", + table="offline_store_database_name.offline_store_table_name", + timestamp_field="event_published_datetime_utc", + date_partition_column="effective_date", + ) + + # Define the parameters for the method + join_key_columns = ["key1", "key2"] + feature_name_columns = ["feature1", "feature2"] + timestamp_field = "event_published_datetime_utc" + created_timestamp_column = "created_timestamp" + start_date = datetime(2021, 1, 1) + end_date = datetime(2021, 1, 2) + + # Call the method + retrieval_job = SparkOfflineStore.pull_latest_from_table_or_query( + config=test_repo_config, + data_source=test_data_source, + join_key_columns=join_key_columns, + feature_name_columns=feature_name_columns, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + start_date=start_date, + end_date=end_date, + ) + + expected_query = """SELECT + key1, key2, feature1, feature2, event_published_datetime_utc, created_timestamp + + FROM ( + SELECT key1, key2, feature1, feature2, event_published_datetime_utc, created_timestamp, + ROW_NUMBER() OVER(PARTITION BY key1, key2 ORDER BY event_published_datetime_utc DESC, created_timestamp DESC) AS feast_row_ + FROM `offline_store_database_name`.`offline_store_table_name` t1 + WHERE event_published_datetime_utc BETWEEN TIMESTAMP('2021-01-01 00:00:00.000000') AND TIMESTAMP('2021-01-02 00:00:00.000000') AND effective_date >= '2021-01-01' AND effective_date <= '2021-01-02' + ) t2 + WHERE feast_row_ = 1""" # noqa: W293, W291 + + assert isinstance(retrieval_job, RetrievalJob) + assert retrieval_job.query.strip() == expected_query.strip() diff --git a/sdk/python/tests/unit/infra/offline_stores/test_offline_store.py b/sdk/python/tests/unit/infra/offline_stores/test_offline_store.py index afc0e4e5c8f..fe2c437617a 100644 --- a/sdk/python/tests/unit/infra/offline_stores/test_offline_store.py +++ b/sdk/python/tests/unit/infra/offline_stores/test_offline_store.py @@ -21,6 +21,7 @@ TrinoRetrievalJob, ) from feast.infra.offline_stores.dask import DaskRetrievalJob +from feast.infra.offline_stores.file_source import FileSource from feast.infra.offline_stores.offline_store import RetrievalJob, RetrievalMetadata from feast.infra.offline_stores.redshift import ( RedshiftOfflineStoreConfig, @@ -246,3 +247,28 @@ def test_to_arrow_timeout(retrieval_job, timeout: Optional[int]): with patch.object(retrieval_job, "_to_arrow_internal") as mock_to_arrow_internal: retrieval_job.to_arrow(timeout=timeout) mock_to_arrow_internal.assert_called_once_with(timeout=timeout) + + +@pytest.mark.parametrize( + "repo_path, uri, expected", + [ + # Remote URI - Should return as-is + ( + "/some/repo", + "s3://bucket-name/file.parquet", + "s3://bucket-name/file.parquet", + ), + # Absolute Path - Should return as-is + ("/some/repo", "/abs/path/file.parquet", "/abs/path/file.parquet"), + # Relative Path with repo_path - Should combine + ("/some/repo", "data/output.parquet", "/some/repo/data/output.parquet"), + # Relative Path without repo_path - Should return absolute path + (None, "C:/path/to/file.parquet", "C:/path/to/file.parquet"), + ], + ids=["s3_uri", "absolute_path", "relative_path", "windows_path"], +) +def test_get_uri_for_file_path( + repo_path: Optional[str], uri: str, expected: str +) -> None: + result = FileSource.get_uri_for_file_path(repo_path=repo_path, uri=uri) + assert result == expected, f"Expected {expected}, but got {result}" diff --git a/sdk/python/tests/unit/infra/offline_stores/test_snowflake.py b/sdk/python/tests/unit/infra/offline_stores/test_snowflake.py index 6e27cba341b..d692d0f957a 100644 --- a/sdk/python/tests/unit/infra/offline_stores/test_snowflake.py +++ b/sdk/python/tests/unit/infra/offline_stores/test_snowflake.py @@ -48,14 +48,17 @@ def retrieval_job(request): def test_to_remote_storage(retrieval_job): stored_files = ["just a path", "maybe another"] - with patch.object( - retrieval_job, "to_snowflake", return_value=None - ) as mock_to_snowflake, patch.object( - retrieval_job, "_get_file_names_from_copy_into", return_value=stored_files - ) as mock_get_file_names_from_copy: - assert ( - retrieval_job.to_remote_storage() == stored_files - ), "should return the list of files" + with ( + patch.object( + retrieval_job, "to_snowflake", return_value=None + ) as mock_to_snowflake, + patch.object( + retrieval_job, "_get_file_names_from_copy_into", return_value=stored_files + ) as mock_get_file_names_from_copy, + ): + assert retrieval_job.to_remote_storage() == stored_files, ( + "should return the list of files" + ) mock_to_snowflake.assert_called_once() mock_get_file_names_from_copy.assert_called_once_with(ANY, ANY) native_path = mock_get_file_names_from_copy.call_args[0][1] diff --git a/sdk/python/tests/unit/infra/registry/__init__.py b/sdk/python/tests/unit/infra/registry/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/tests/unit/infra/registry/test_registry.py b/sdk/python/tests/unit/infra/registry/test_registry.py new file mode 100644 index 00000000000..65dea2ff680 --- /dev/null +++ b/sdk/python/tests/unit/infra/registry/test_registry.py @@ -0,0 +1,197 @@ +from datetime import datetime, timedelta, timezone +from unittest.mock import patch + +import pytest + +from feast.infra.registry.caching_registry import CachingRegistry + + +class TestCachingRegistry(CachingRegistry): + """Test subclass that implements abstract methods as no-ops""" + + def _get_any_feature_view(self, *args, **kwargs): + pass + + def _get_data_source(self, *args, **kwargs): + pass + + def _get_entity(self, *args, **kwargs): + pass + + def _get_feature_service(self, *args, **kwargs): + pass + + def _get_feature_view(self, *args, **kwargs): + pass + + def _get_infra(self, *args, **kwargs): + pass + + def _get_on_demand_feature_view(self, *args, **kwargs): + pass + + def _get_permission(self, *args, **kwargs): + pass + + def _get_project(self, *args, **kwargs): + pass + + def _get_saved_dataset(self, *args, **kwargs): + pass + + def _get_stream_feature_view(self, *args, **kwargs): + pass + + def _get_validation_reference(self, *args, **kwargs): + pass + + def _list_all_feature_views(self, *args, **kwargs): + pass + + def _list_data_sources(self, *args, **kwargs): + pass + + def _list_entities(self, *args, **kwargs): + pass + + def _list_feature_services(self, *args, **kwargs): + pass + + def _list_feature_views(self, *args, **kwargs): + pass + + def _list_on_demand_feature_views(self, *args, **kwargs): + pass + + def _list_permissions(self, *args, **kwargs): + pass + + def _list_project_metadata(self, *args, **kwargs): + pass + + def _list_projects(self, *args, **kwargs): + pass + + def _list_saved_datasets(self, *args, **kwargs): + pass + + def _list_stream_feature_views(self, *args, **kwargs): + pass + + def _list_validation_references(self, *args, **kwargs): + pass + + def apply_data_source(self, *args, **kwargs): + pass + + def apply_entity(self, *args, **kwargs): + pass + + def apply_feature_service(self, *args, **kwargs): + pass + + def apply_feature_view(self, *args, **kwargs): + pass + + def apply_materialization(self, *args, **kwargs): + pass + + def apply_permission(self, *args, **kwargs): + pass + + def apply_project(self, *args, **kwargs): + pass + + def apply_saved_dataset(self, *args, **kwargs): + pass + + def apply_user_metadata(self, *args, **kwargs): + pass + + def apply_validation_reference(self, *args, **kwargs): + pass + + def commit(self, *args, **kwargs): + pass + + def delete_data_source(self, *args, **kwargs): + pass + + def delete_entity(self, *args, **kwargs): + pass + + def delete_feature_service(self, *args, **kwargs): + pass + + def delete_feature_view(self, *args, **kwargs): + pass + + def delete_permission(self, *args, **kwargs): + pass + + def delete_project(self, *args, **kwargs): + pass + + def delete_validation_reference(self, *args, **kwargs): + pass + + def get_user_metadata(self, *args, **kwargs): + pass + + def proto(self, *args, **kwargs): + pass + + def update_infra(self, *args, **kwargs): + pass + + +@pytest.fixture +def registry(): + """Fixture to create a real instance of CachingRegistry""" + return TestCachingRegistry( + project="test_example", cache_ttl_seconds=2, cache_mode="sync" + ) + + +def test_cache_expiry_triggers_refresh(registry): + """Test that an expired cache triggers a refresh""" + # Set cache creation time to a value that is expired + registry.cached_registry_proto = "some_cached_data" + registry.cached_registry_proto_created = datetime.now(timezone.utc) - timedelta( + seconds=5 + ) + + # Mock _refresh_cached_registry_if_necessary to check if it is called + with patch.object( + CachingRegistry, + "_refresh_cached_registry_if_necessary", + wraps=registry._refresh_cached_registry_if_necessary, + ) as mock_refresh_check: + registry._refresh_cached_registry_if_necessary() + mock_refresh_check.assert_called_once() + + # Now check if the refresh was actually triggered + with patch.object( + CachingRegistry, "refresh", wraps=registry.refresh + ) as mock_refresh: + registry._refresh_cached_registry_if_necessary() + mock_refresh.assert_called_once() + + +def test_skip_refresh_if_lock_held(registry): + """Test that refresh is skipped if the lock is already held by another thread""" + registry.cached_registry_proto = "some_cached_data" + registry.cached_registry_proto_created = datetime.now(timezone.utc) - timedelta( + seconds=5 + ) + + # Acquire the lock manually to simulate another thread holding it + registry._refresh_lock.acquire() + with patch.object( + CachingRegistry, "refresh", wraps=registry.refresh + ) as mock_refresh: + registry._refresh_cached_registry_if_necessary() + + # Since the lock was already held, refresh should NOT be called + mock_refresh.assert_not_called() + registry._refresh_lock.release() diff --git a/sdk/python/tests/unit/infra/test_inference_unit_tests.py b/sdk/python/tests/unit/infra/test_inference_unit_tests.py index 54488d43212..951f7033d23 100644 --- a/sdk/python/tests/unit/infra/test_inference_unit_tests.py +++ b/sdk/python/tests/unit/infra/test_inference_unit_tests.py @@ -154,23 +154,6 @@ def python_native_test_invalid_pandas_view( } return output_dict - with pytest.raises(TypeError): - - @on_demand_feature_view( - sources=[date_request], - schema=[ - Field(name="output", dtype=UnixTimestamp), - Field(name="object_output", dtype=String), - ], - mode="python", - ) - def python_native_test_invalid_dict_view( - features_df: pd.DataFrame, - ) -> pd.DataFrame: - data = pd.DataFrame() - data["output"] = features_df["some_date"] - return data - def test_datasource_inference(): # Create Feature Views diff --git a/sdk/python/tests/unit/infra/test_key_encoding_utils.py b/sdk/python/tests/unit/infra/test_key_encoding_utils.py index 658c04c358b..cad5e27f367 100644 --- a/sdk/python/tests/unit/infra/test_key_encoding_utils.py +++ b/sdk/python/tests/unit/infra/test_key_encoding_utils.py @@ -52,6 +52,24 @@ def test_deserialize_entity_key(): ) +def test_deserialize_multiple_entity_keys(): + entity_key_proto = EntityKeyProto( + join_keys=["customer", "user"], + entity_values=[ValueProto(string_val="test"), ValueProto(int64_val=int(2**15))], + ) + + serialized_entity_key = serialize_entity_key( + entity_key_proto, + entity_key_serialization_version=3, + ) + + deserialized_entity_key = deserialize_entity_key( + serialized_entity_key, + entity_key_serialization_version=3, + ) + assert deserialized_entity_key == entity_key_proto + + def test_serialize_value(): v, t = _serialize_val("string_val", ValueProto(string_val="test")) assert t == ValueType.STRING diff --git a/sdk/python/tests/unit/infra/utils/snowflake/test_snowflake_utils.py b/sdk/python/tests/unit/infra/utils/snowflake/test_snowflake_utils.py new file mode 100644 index 00000000000..8ae6ec63ba5 --- /dev/null +++ b/sdk/python/tests/unit/infra/utils/snowflake/test_snowflake_utils.py @@ -0,0 +1,71 @@ +import tempfile +from typing import Optional + +import pytest +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + +from feast.infra.utils.snowflake.snowflake_utils import parse_private_key_path + +PRIVATE_KEY_PASSPHRASE = "test" + + +def _pem_private_key(passphrase: Optional[str]): + private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + return private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=( + serialization.BestAvailableEncryption(passphrase.encode()) + if passphrase + else serialization.NoEncryption() + ), + ) + + +@pytest.fixture +def unencrypted_private_key(): + return _pem_private_key(None) + + +@pytest.fixture +def encrypted_private_key(): + return _pem_private_key(PRIVATE_KEY_PASSPHRASE) + + +def test_parse_private_key_path_key_content_unencrypted(unencrypted_private_key): + parse_private_key_path( + None, + None, + unencrypted_private_key, + ) + + +def test_parse_private_key_path_key_content_encrypted(encrypted_private_key): + parse_private_key_path( + PRIVATE_KEY_PASSPHRASE, + None, + encrypted_private_key, + ) + + +def test_parse_private_key_path_key_path_unencrypted(unencrypted_private_key): + with tempfile.NamedTemporaryFile(mode="wb") as f: + f.write(unencrypted_private_key) + f.flush() + parse_private_key_path( + None, + f.name, + None, + ) + + +def test_parse_private_key_path_key_path_encrypted(encrypted_private_key): + with tempfile.NamedTemporaryFile(mode="wb") as f: + f.write(encrypted_private_key) + f.flush() + parse_private_key_path( + PRIVATE_KEY_PASSPHRASE, + f.name, + None, + ) diff --git a/sdk/python/tests/unit/local_feast_tests/test_local_feature_store.py b/sdk/python/tests/unit/local_feast_tests/test_local_feature_store.py index 1b9b48d8d0a..931acfb3919 100644 --- a/sdk/python/tests/unit/local_feast_tests/test_local_feature_store.py +++ b/sdk/python/tests/unit/local_feast_tests/test_local_feature_store.py @@ -93,6 +93,7 @@ def test_apply_feature_view(test_feature_store): Field(name="fs1_my_feature_4", dtype=Array(Bytes)), Field(name="entity_id", dtype=Int64), ], + udf=lambda df: df, entities=[entity], tags={"team": "matchmaking", "tag": "two"}, source=batch_source, @@ -654,7 +655,7 @@ def pandas_view(pandas_df): import pandas as pd assert type(pandas_df) == pd.DataFrame - df = pandas_df.transform(lambda x: x + 10, axis=1) + df = pandas_df.transform(lambda x: x + 10) df.insert(2, "C", [20.2, 230.0, 34.0], True) return df diff --git a/sdk/python/tests/unit/online_store/test_online_retrieval.py b/sdk/python/tests/unit/online_store/test_online_retrieval.py index 83184643f35..ea76ed6f544 100644 --- a/sdk/python/tests/unit/online_store/test_online_retrieval.py +++ b/sdk/python/tests/unit/online_store/test_online_retrieval.py @@ -1,5 +1,6 @@ import os import platform +import random import sqlite3 import sys import time @@ -16,6 +17,7 @@ from feast.protos.feast.types.Value_pb2 import FloatList as FloatListProto from feast.protos.feast.types.Value_pb2 import Value as ValueProto from feast.repo_config import RegistryConfig +from feast.types import ValueType from feast.utils import _utc_now from tests.integration.feature_repos.universal.feature_views import TAGS from tests.utils.cli_repo_creator import CliRunner, get_example_repo @@ -27,7 +29,8 @@ def test_get_online_features() -> None: """ runner = CliRunner() with runner.local_repo( - get_example_repo("example_feature_repo_1.py"), "file" + get_example_repo("example_feature_repo_1.py"), + "file", ) as store: # Write some data to two tables driver_locations_fv = store.get_feature_view(name="driver_locations") @@ -134,6 +137,21 @@ def test_get_online_features() -> None: assert "trips" in result + with pytest.raises(KeyError) as excinfo: + _ = store.get_online_features( + features=["driver_locations:lon"], + entity_rows=[{"customer_id": 0}], + full_feature_names=False, + ).to_dict() + + error_message = str(excinfo.value) + assert "Missing join key values for keys:" in error_message + assert ( + "Missing join key values for keys: ['customer_id', 'driver_id', 'item_id']." + in error_message + ) + assert "Provided join_key_values: ['customer_id']" in error_message + result = store.get_online_features( features=["customer_profile_pandas_odfv:on_demand_age"], entity_rows=[{"driver_id": 1, "customer_id": "5"}], @@ -272,6 +290,178 @@ def test_get_online_features() -> None: os.rename(store.config.registry.path + "_fake", store.config.registry.path) +def test_get_online_features_milvus() -> None: + """ + Test reading from the online store in local mode. + """ + runner = CliRunner() + with runner.local_repo( + get_example_repo("example_feature_repo_1.py"), + offline_store="file", + online_store="milvus", + apply=False, + teardown=False, + ) as store: + from tests.example_repos.example_feature_repo_1 import ( + all_drivers_feature_service, + customer, + customer_driver_combined, + customer_driver_combined_source, + customer_profile, + customer_profile_pandas_odfv, + customer_profile_source, + driver, + driver_locations, + driver_locations_source, + item, + pushed_driver_locations, + rag_documents_source, + ) + + store.apply( + [ + driver_locations_source, + customer_profile_source, + customer_driver_combined_source, + rag_documents_source, + driver, + customer, + item, + driver_locations, + pushed_driver_locations, + customer_profile, + customer_driver_combined, + # document_embeddings, + customer_profile_pandas_odfv, + all_drivers_feature_service, + ] + ) + + # Write some data to two tables + driver_locations_fv = store.get_feature_view(name="driver_locations") + customer_profile_fv = store.get_feature_view(name="customer_profile") + customer_driver_combined_fv = store.get_feature_view( + name="customer_driver_combined" + ) + + provider = store._get_provider() + + driver_key = EntityKeyProto( + join_keys=["driver_id"], entity_values=[ValueProto(int64_val=1)] + ) + provider.online_write_batch( + config=store.config, + table=driver_locations_fv, + data=[ + ( + driver_key, + { + "lat": ValueProto(double_val=0.1), + "lon": ValueProto(string_val="1.0"), + }, + _utc_now(), + _utc_now(), + ) + ], + progress=None, + ) + + customer_key = EntityKeyProto( + join_keys=["customer_id"], entity_values=[ValueProto(string_val="5")] + ) + provider.online_write_batch( + config=store.config, + table=customer_profile_fv, + data=[ + ( + customer_key, + { + "avg_orders_day": ValueProto(float_val=1.0), + "name": ValueProto(string_val="John"), + "age": ValueProto(int64_val=3), + }, + _utc_now(), + _utc_now(), + ) + ], + progress=None, + ) + + customer_key = EntityKeyProto( + join_keys=["customer_id", "driver_id"], + entity_values=[ValueProto(string_val="5"), ValueProto(int64_val=1)], + ) + provider.online_write_batch( + config=store.config, + table=customer_driver_combined_fv, + data=[ + ( + customer_key, + {"trips": ValueProto(int64_val=7)}, + _utc_now(), + _utc_now(), + ) + ], + progress=None, + ) + + assert len(store.list_entities()) == 3 + assert len(store.list_entities(tags=TAGS)) == 2 + + # Retrieve two features using two keys, one valid one non-existing + result = store.get_online_features( + features=[ + "driver_locations:lon", + "customer_profile:avg_orders_day", + "customer_profile:name", + "customer_driver_combined:trips", + ], + entity_rows=[ + {"driver_id": 1, "customer_id": "5"}, + {"driver_id": 1, "customer_id": 5}, + ], + full_feature_names=False, + ).to_dict() + + assert "lon" in result + assert "avg_orders_day" in result + assert "name" in result + assert result["driver_id"] == [1, 1] + assert result["customer_id"] == ["5", "5"] + assert result["lon"] == ["1.0", "1.0"] + assert result["avg_orders_day"] == [1.0, 1.0] + assert result["name"] == ["John", "John"] + assert result["trips"] == [7, 7] + + # Ensure features are still in result when keys not found + result = store.get_online_features( + features=["customer_driver_combined:trips"], + entity_rows=[{"driver_id": 0, "customer_id": 0}], + full_feature_names=False, + ).to_dict() + + assert "trips" in result + + result = store.get_online_features( + features=["customer_profile_pandas_odfv:on_demand_age"], + entity_rows=[{"driver_id": 1, "customer_id": "5"}], + full_feature_names=False, + ).to_dict() + + assert "on_demand_age" in result + assert result["driver_id"] == [1] + assert result["customer_id"] == ["5"] + assert result["on_demand_age"] == [4] + + # invalid table reference + with pytest.raises(FeatureViewNotFoundException): + store.get_online_features( + features=["driver_locations_bad:lon"], + entity_rows=[{"driver_id": 1}], + full_feature_names=False, + ) + + def test_online_to_df(): """ Test dataframe conversion. Make sure the response columns and rows are @@ -450,12 +640,12 @@ def test_sqlite_get_online_documents() -> None: item_keys = [ EntityKeyProto( - join_keys=["item_id"], entity_values=[ValueProto(int64_val=i)] + join_keys=["item_id"], entity_values=[ValueProto(string_val=str(i))] ) for i in range(n) ] data = [] - for item_key in item_keys: + for i, item_key in enumerate(item_keys): data.append( ( item_key, @@ -466,19 +656,17 @@ def test_sqlite_get_online_documents() -> None: vector_length, ) ) - ) + ), + "content": ValueProto( + string_val=f"the {i}th sentence with some text" + ), + "title": ValueProto(string_val=f"Title {i}"), }, _utc_now(), _utc_now(), ) ) - provider.online_write_batch( - config=store.config, - table=document_embeddings_fv, - data=data, - progress=None, - ) documents_df = pd.DataFrame( { "item_id": [str(i) for i in range(n)], @@ -488,26 +676,42 @@ def test_sqlite_get_online_documents() -> None: ) for i in range(n) ], + "content": [f"the {i}th sentence with some text" for i in range(n)], + "title": [f"Title {i}" for i in range(n)], "event_timestamp": [_utc_now() for _ in range(n)], } ) - store.write_to_online_store( - feature_view_name="document_embeddings", - df=documents_df, + print(len(data), documents_df.shape[0]) + provider.online_write_batch( + config=store.config, + table=document_embeddings_fv, + data=data, + progress=None, ) - document_table = store._provider._online_store._conn.execute( "SELECT name FROM sqlite_master WHERE type='table' and name like '%_document_embeddings';" ).fetchall() + assert len(document_table) == 1 document_table_name = document_table[0][0] + record_count = len( store._provider._online_store._conn.execute( f"select * from {document_table_name}" ).fetchall() ) - assert record_count == len(data) + documents_df.shape[0] + assert record_count == len(data) * len(document_embeddings_fv.features) + store.write_to_online_store( + feature_view_name="document_embeddings", + df=documents_df, + ) + record_count = len( + store._provider._online_store._conn.execute( + f"select * from {document_table_name}" + ).fetchall() + ) + assert record_count == len(data) * len(document_embeddings_fv.features) query_embedding = np.random.random( vector_length, @@ -561,3 +765,526 @@ def test_sqlite_vec_import() -> None: """).fetchall() result = [(rowid, round(distance, 2)) for rowid, distance in result] assert result == [(2, 2.39), (1, 2.39)] + + +def test_sqlite_hybrid_search() -> None: + imdb_sample_data = { + "Rank": {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}, + "Title": { + 0: "Guardians of the Galaxy", + 1: "Prometheus", + 2: "Split", + 3: "Sing", + 4: "Suicide Squad", + }, + "Genre": { + 0: "Action,Adventure,Sci-Fi", + 1: "Adventure,Mystery,Sci-Fi", + 2: "Horror,Thriller", + 3: "Animation,Comedy,Family", + 4: "Action,Adventure,Fantasy", + }, + "Description": { + 0: "A group of intergalactic criminals are forced to work together to stop a fanatical warrior from taking control of the universe.", + 1: "Following clues to the origin of mankind, a team finds a structure on a distant moon, but they soon realize they are not alone.", + 2: "Three girls are kidnapped by a man with a diagnosed 23 distinct personalities. They must try to escape before the apparent emergence of a frightful new 24th.", + 3: "In a city of humanoid animals, a hustling theater impresario's attempt to save his theater with a singing competition becomes grander than he anticipates even as its finalists' find that their lives will never be the same.", + 4: "A secret government agency recruits some of the most dangerous incarcerated super-villains to form a defensive task force. Their first mission: save the world from the apocalypse.", + }, + "Director": { + 0: "James Gunn", + 1: "Ridley Scott", + 2: "M. Night Shyamalan", + 3: "Christophe Lourdelet", + 4: "David Ayer", + }, + "Actors": { + 0: "Chris Pratt, Vin Diesel, Bradley Cooper, Zoe Saldana", + 1: "Noomi Rapace, Logan Marshall-Green, Michael Fassbender, Charlize Theron", + 2: "James McAvoy, Anya Taylor-Joy, Haley Lu Richardson, Jessica Sula", + 3: "Matthew McConaughey,Reese Witherspoon, Seth MacFarlane, Scarlett Johansson", + 4: "Will Smith, Jared Leto, Margot Robbie, Viola Davis", + }, + "Year": {0: 2014, 1: 2012, 2: 2016, 3: 2016, 4: 2016}, + "Runtime (Minutes)": {0: 121, 1: 124, 2: 117, 3: 108, 4: 123}, + "Rating": {0: 8.1, 1: 7.0, 2: 7.3, 3: 7.2, 4: 6.2}, + "Votes": {0: 757074, 1: 485820, 2: 157606, 3: 60545, 4: 393727}, + "Revenue (Millions)": {0: 333.13, 1: 126.46, 2: 138.12, 3: 270.32, 4: 325.02}, + "Metascore": {0: 76.0, 1: 65.0, 2: 62.0, 3: 59.0, 4: 40.0}, + } + df = pd.DataFrame(imdb_sample_data) + db = sqlite3.connect(":memory:") + + cur = db.cursor() + + cur.execute( + 'create virtual table imdb using fts5(title, description, genre, rating, tokenize="porter unicode61");' + ) + cur.executemany( + "insert into imdb (title, description, genre, rating) values (?,?,?,?);", + df[["Title", "Description", "Genre", "Rating"]].to_records(index=False), + ) + db.commit() + + query = "Prom" + res = cur.execute(f"""select title, description, genre, rating, rank + from imdb + where title MATCH "{query}*" + ORDER BY rank + limit 5""").fetchall() + assert len(res) == 1 + assert res[0][0] == "Prometheus" + + q = "(title : the OR of) AND (genre: Action OR Comedy)" + res_df = pd.read_sql_query( + f""" + select + rowid, + title, + description, + bm25(imdb, 10.0, 5.0) + from imdb + where imdb MATCH "{q}" + ORDER BY bm25(imdb, 10.0, 5.0) + limit 5 + """, + db, + ) + res_df["rowid"].tolist() == [1, 4, 5] + res_df["title"].tolist() == ["Guardians of the Galaxy", "Sing", "Suicide Squad"] + + +@pytest.mark.skipif( + sys.version_info[0:2] != (3, 10), + reason="Only works on Python 3.10", +) +def test_sqlite_get_online_documents_v2() -> None: + """Test retrieving documents using v2 method with vector similarity search.""" + n = 10 + vector_length = 8 + runner = CliRunner() + with runner.local_repo( + get_example_repo("example_feature_repo_1.py"), "file" + ) as store: + store.config.online_store.vector_enabled = True + store.config.online_store.vector_len = vector_length + store.config.entity_key_serialization_version = 3 + document_embeddings_fv = store.get_feature_view(name="document_embeddings") + + provider = store._get_provider() + + # Create test data + item_keys = [ + EntityKeyProto( + join_keys=["item_id"], entity_values=[ValueProto(int64_val=i)] + ) + for i in range(n) + ] + data = [] + for i, item_key in enumerate(item_keys): + data.append( + ( + item_key, + { + "Embeddings": ValueProto( + float_list_val=FloatListProto( + val=[float(x) for x in np.random.random(vector_length)] + ) + ), + "content": ValueProto( + string_val=f"the {i}th sentence with some text" + ), + "title": ValueProto(string_val=f"Title {i}"), + }, + _utc_now(), + _utc_now(), + ) + ) + + provider.online_write_batch( + config=store.config, + table=document_embeddings_fv, + data=data, + progress=None, + ) + + # Test vector similarity search + query_embedding = [float(x) for x in np.random.random(vector_length)] + result = store.retrieve_online_documents_v2( + features=[ + "document_embeddings:Embeddings", + "document_embeddings:content", + "document_embeddings:title", + ], + query=query_embedding, + top_k=3, + ).to_dict() + + assert "Embeddings" in result + assert "content" in result + assert "title" in result + assert "distance" in result + assert ["1th sentence with some text" in r for r in result["content"]] + assert ["Title " in r for r in result["title"]] + assert len(result["distance"]) == 3 + + +def test_sqlite_get_online_documents_v2_search() -> None: + """Test retrieving documents using v2 method with key word search""" + n = 10 + vector_length = 8 + runner = CliRunner() + with runner.local_repo( + get_example_repo("example_feature_repo_1.py"), "file" + ) as store: + store.config.online_store.text_search_enabled = True + store.config.entity_key_serialization_version = 3 + document_embeddings_fv = store.get_feature_view(name="document_embeddings") + + provider = store._get_provider() + + # Create test data + item_keys = [ + EntityKeyProto( + join_keys=["item_id"], entity_values=[ValueProto(int64_val=i)] + ) + for i in range(n) + ] + data = [] + for i, item_key in enumerate(item_keys): + data.append( + ( + item_key, + { + "Embeddings": ValueProto( + float_list_val=FloatListProto( + val=[float(x) for x in np.random.random(vector_length)] + ) + ), + "content": ValueProto( + string_val=f"the {i}th sentence with some text" + ), + "title": ValueProto(string_val=f"Title {i}"), + }, + _utc_now(), + _utc_now(), + ) + ) + + provider.online_write_batch( + config=store.config, + table=document_embeddings_fv, + data=data, + progress=None, + ) + + # Test vector similarity search + # query_embedding = [float(x) for x in np.random.random(vector_length)] + result = store.retrieve_online_documents_v2( + features=[ + "document_embeddings:Embeddings", + "document_embeddings:content", + "document_embeddings:title", + ], + query_string="(content: 5) OR (title: 1) OR (title: 3)", + top_k=3, + ).to_dict() + + assert "Embeddings" in result + assert "content" in result + assert "title" in result + assert "distance" in result + assert ["1th sentence with some text" in r for r in result["content"]] + assert ["Title " in r for r in result["title"]] + assert len(result["distance"]) == 2 + assert result["distance"] == [-1.8458267450332642, -1.8458267450332642] + + +@pytest.mark.skip(reason="Skipping this test as CI struggles with it") +def test_local_milvus() -> None: + import random + + from pymilvus import MilvusClient + + random.seed(42) + VECTOR_LENGTH: int = 768 + COLLECTION_NAME: str = "test_demo_collection" + + client = MilvusClient("./milvus_demo.db") + + for collection in client.list_collections(): + client.drop_collection(collection_name=collection) + client.create_collection( + collection_name=COLLECTION_NAME, + dimension=VECTOR_LENGTH, + ) + assert client.list_collections() == [COLLECTION_NAME] + + docs = [ + "Artificial intelligence was founded as an academic discipline in 1956.", + "Alan Turing was the first person to conduct substantial research in AI.", + "Born in Maida Vale, London, Turing was raised in southern England.", + ] + # Use fake representation with random vectors (vector_length dimension). + vectors = [[random.uniform(-1, 1) for _ in range(VECTOR_LENGTH)] for _ in docs] + data = [ + {"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"} + for i in range(len(vectors)) + ] + + print("Data has", len(data), "entities, each with fields: ", data[0].keys()) + print("Vector dim:", len(data[0]["vector"])) + + insert_res = client.insert(collection_name=COLLECTION_NAME, data=data) + assert insert_res == {"insert_count": 3, "ids": [0, 1, 2], "cost": 0} + + query_vectors = [[random.uniform(-1, 1) for _ in range(VECTOR_LENGTH)]] + + search_res = client.search( + collection_name=COLLECTION_NAME, # target collection + data=query_vectors, # query vectors + limit=2, # number of returned entities + output_fields=["text", "subject"], # specifies fields to be returned + ) + assert [j["id"] for j in search_res[0]] == [0, 1] + query_result = client.query( + collection_name=COLLECTION_NAME, + filter="id == 0", + ) + assert list(query_result[0].keys()) == ["id", "text", "subject", "vector"] + + client.drop_collection(collection_name=COLLECTION_NAME) + + +def test_milvus_lite_get_online_documents_v2() -> None: + """ + Test retrieving documents from the online store in local mode. + """ + + random.seed(42) + n = 10 # number of samples - note: we'll actually double it + vector_length = 10 + runner = CliRunner() + with runner.local_repo( + example_repo_py=get_example_repo("example_rag_feature_repo.py"), + offline_store="file", + online_store="milvus", + apply=False, + teardown=False, + ) as store: + from datetime import timedelta + + from feast import Entity, FeatureView, Field, FileSource + from feast.types import Array, Float32, Int64, String, UnixTimestamp + + # This is for Milvus + # Note that file source paths are not validated, so there doesn't actually need to be any data + # at the paths for these file sources. Since these paths are effectively fake, this example + # feature repo should not be used for historical retrieval. + + rag_documents_source = FileSource( + path="data/embedded_documents.parquet", + timestamp_field="event_timestamp", + created_timestamp_column="created_timestamp", + ) + + item = Entity( + name="item_id", # The name is derived from this argument, not object name. + join_keys=["item_id"], + value_type=ValueType.INT64, + ) + author = Entity( + name="author_id", + join_keys=["author_id"], + value_type=ValueType.STRING, + ) + + document_embeddings = FeatureView( + name="embedded_documents", + entities=[item, author], + schema=[ + Field( + name="vector", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="COSINE", + ), + Field(name="item_id", dtype=Int64), + Field(name="author_id", dtype=String), + Field(name="created_timestamp", dtype=UnixTimestamp), + Field(name="sentence_chunks", dtype=String), + Field(name="event_timestamp", dtype=UnixTimestamp), + ], + source=rag_documents_source, + ttl=timedelta(hours=24), + ) + + store.apply([rag_documents_source, item, document_embeddings]) + + # Write some data to two tables + document_embeddings_fv = store.get_feature_view(name="embedded_documents") + + provider = store._get_provider() + + item_keys = [ + EntityKeyProto( + join_keys=["item_id", "author_id"], + entity_values=[ + ValueProto(int64_val=i), + ValueProto(string_val=f"author_{i}"), + ], + ) + for i in range(n) + ] + data = [] + for i, item_key in enumerate(item_keys): + data.append( + ( + item_key, + { + "vector": ValueProto( + float_list_val=FloatListProto( + val=np.random.random( + vector_length, + ) + + i + ) + ), + "sentence_chunks": ValueProto(string_val=f"sentence chunk {i}"), + }, + _utc_now(), + _utc_now(), + ) + ) + + provider.online_write_batch( + config=store.config, + table=document_embeddings_fv, + data=data, + progress=None, + ) + documents_df = pd.DataFrame( + { + "item_id": [str(i) for i in range(n)], + "author_id": [f"author_{i}" for i in range(n)], + "vector": [ + np.random.random( + vector_length, + ) + + i + for i in range(n) + ], + "sentence_chunks": [f"sentence chunk {i}" for i in range(n)], + "event_timestamp": [_utc_now() for _ in range(n)], + "created_timestamp": [_utc_now() for _ in range(n)], + } + ) + + store.write_to_online_store( + feature_view_name="embedded_documents", + df=documents_df, + ) + + query_embedding = np.random.random( + vector_length, + ) + + client = store._provider._online_store.client + collection_name = client.list_collections()[0] + search_params = { + "metric_type": "COSINE", + "params": {"nprobe": 10}, + } + + results = client.search( + collection_name=collection_name, + data=[query_embedding], + anns_field="vector", + search_params=search_params, + limit=3, + output_fields=[ + "item_id", + "author_id", + "sentence_chunks", + "created_ts", + "event_ts", + ], + ) + result = store.retrieve_online_documents_v2( + features=[ + "embedded_documents:vector", + "embedded_documents:item_id", + "embedded_documents:author_id", + "embedded_documents:sentence_chunks", + ], + query=query_embedding, + top_k=3, + ).to_dict() + + for k in ["vector", "item_id", "author_id", "sentence_chunks", "distance"]: + assert k in result, f"Missing {k} in retrieve_online_documents response" + assert len(result["distance"]) == len(results[0]) + + +def test_milvus_native_from_feast_data() -> None: + import random + from datetime import datetime + + import numpy as np + from pymilvus import MilvusClient + + random.seed(42) + VECTOR_LENGTH = 10 # Matches vector_length from the Feast example + COLLECTION_NAME = "embedded_documents" + + # Initialize Milvus client with local setup + client = MilvusClient("./milvus_demo.db") + + # Clear and recreate collection + for collection in client.list_collections(): + client.drop_collection(collection_name=collection) + client.create_collection( + collection_name=COLLECTION_NAME, + dimension=VECTOR_LENGTH, + metric_type="COSINE", # Matches Feast's vector_search_metric + ) + assert client.list_collections() == [COLLECTION_NAME] + + # Prepare data for insertion, similar to the Feast example + n = 10 # Number of items + data = [] + for i in range(n): + vector = (np.random.random(VECTOR_LENGTH) + i).tolist() + data.append( + { + "id": i, + "vector": vector, + "item_id": i, + "author_id": f"author_{i}", + "sentence_chunks": f"sentence chunk {i}", + "event_timestamp": datetime.utcnow().isoformat(), + "created_timestamp": datetime.utcnow().isoformat(), + } + ) + + print("Data has", len(data), "entities, each with fields:", data[0].keys()) + + # Insert data into Milvus + insert_res = client.insert(collection_name=COLLECTION_NAME, data=data) + assert insert_res == {"insert_count": n, "ids": list(range(n)), "cost": 0} + + # Perform a vector search using a random query embedding + query_embedding = (np.random.random(VECTOR_LENGTH)).tolist() + search_res = client.search( + collection_name=COLLECTION_NAME, + data=[query_embedding], + limit=5, # Top 3 results + output_fields=["item_id", "author_id", "sentence_chunks"], + ) + + # Validate the search results + assert len(search_res[0]) == 5 + print("Search Results:", search_res[0]) + + # Clean up the collection + client.drop_collection(collection_name=COLLECTION_NAME) diff --git a/sdk/python/tests/unit/permissions/auth/server/test_auth_registry_server.py b/sdk/python/tests/unit/permissions/auth/server/test_auth_registry_server.py index 25c5fe3eb8c..0395f995410 100644 --- a/sdk/python/tests/unit/permissions/auth/server/test_auth_registry_server.py +++ b/sdk/python/tests/unit/permissions/auth/server/test_auth_registry_server.py @@ -44,7 +44,7 @@ def start_registry_server( assertpy.assert_that(server_port).is_not_equal_to(0) - is_tls_mode, tls_key_path, tls_cert_path = tls_mode + is_tls_mode, tls_key_path, tls_cert_path, tls_ca_file_path = tls_mode if is_tls_mode: print(f"Starting Registry in TLS mode at {server_port}") server = start_server( @@ -74,6 +74,9 @@ def start_registry_server( server.stop(grace=None) # Teardown server +@pytest.mark.parametrize( + "tls_mode", [("True", "True"), ("True", "False"), ("False", "")], indirect=True +) def test_registry_apis( auth_config, tls_mode, diff --git a/sdk/python/tests/unit/permissions/test_oidc_auth_client.py b/sdk/python/tests/unit/permissions/test_oidc_auth_client.py index 68aec70fc79..3d74eb2a55f 100644 --- a/sdk/python/tests/unit/permissions/test_oidc_auth_client.py +++ b/sdk/python/tests/unit/permissions/test_oidc_auth_client.py @@ -58,6 +58,6 @@ def _assert_auth_requests_session( "Authorization header is missing in object of class: " "AuthenticatedRequestsSession " ) - assert ( - auth_req_session.headers["Authorization"] == f"Bearer {expected_token}" - ), "Authorization token is incorrect" + assert auth_req_session.headers["Authorization"] == f"Bearer {expected_token}", ( + "Authorization token is incorrect" + ) diff --git a/sdk/python/tests/unit/test_entity.py b/sdk/python/tests/unit/test_entity.py index 78f71231049..b36f363a6ff 100644 --- a/sdk/python/tests/unit/test_entity.py +++ b/sdk/python/tests/unit/test_entity.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings + import assertpy import pytest @@ -73,3 +75,16 @@ def test_hash(): s4 = {entity1, entity2, entity3, entity4} assert len(s4) == 3 + + +def test_entity_without_value_type_warns(): + with pytest.warns(DeprecationWarning, match="Entity value_type will be mandatory"): + entity = Entity(name="my-entity") + assert entity.value_type == ValueType.UNKNOWN + + +def test_entity_with_value_type_no_warning(): + with warnings.catch_warnings(): + warnings.simplefilter("error") + entity = Entity(name="my-entity", value_type=ValueType.STRING) + assert entity.value_type == ValueType.STRING diff --git a/sdk/python/tests/unit/test_feature_views.py b/sdk/python/tests/unit/test_feature_views.py index ce789c706c5..911c94ff34c 100644 --- a/sdk/python/tests/unit/test_feature_views.py +++ b/sdk/python/tests/unit/test_feature_views.py @@ -36,6 +36,8 @@ def test_create_batch_feature_view(): entities=[], ttl=timedelta(days=30), source=batch_source, + mode="python", + udf=lambda x: x, ) with pytest.raises(TypeError): @@ -54,6 +56,8 @@ def test_create_batch_feature_view(): with pytest.raises(ValueError): BatchFeatureView( name="test batch feature view", + mode="python", + udf=lambda x: x, entities=[], ttl=timedelta(days=30), source=stream_source, diff --git a/sdk/python/tests/unit/test_on_demand_feature_view.py b/sdk/python/tests/unit/test_on_demand_feature_view.py index 4b30bd6be99..505146aa612 100644 --- a/sdk/python/tests/unit/test_on_demand_feature_view.py +++ b/sdk/python/tests/unit/test_on_demand_feature_view.py @@ -24,6 +24,7 @@ OnDemandFeatureView, PandasTransformation, PythonTransformation, + on_demand_feature_view, ) from feast.types import Float32 @@ -148,7 +149,7 @@ def test_hash(): assert len(s4) == 3 assert on_demand_feature_view_5.feature_transformation == PandasTransformation( - udf2, "udf2 source code" + udf2, udf_string="udf2 source code" ) @@ -179,26 +180,25 @@ def test_python_native_transformation_mode(): mode="python", ) - on_demand_feature_view_python_native_err = OnDemandFeatureView( - name="my-on-demand-feature-view", - sources=sources, - schema=[ - Field(name="output1", dtype=Float32), - Field(name="output2", dtype=Float32), - ], - feature_transformation=PandasTransformation( - udf=python_native_udf, udf_string="python native udf source code" - ), - description="test", - mode="python", - ) - assert ( on_demand_feature_view_python_native.feature_transformation == PythonTransformation(python_native_udf, "python native udf source code") ) with pytest.raises(TypeError): + on_demand_feature_view_python_native_err = OnDemandFeatureView( + name="my-on-demand-feature-view", + sources=sources, + schema=[ + Field(name="output1", dtype=Float32), + Field(name="output2", dtype=Float32), + ], + feature_transformation=PandasTransformation( + udf=python_native_udf, udf_string="python native udf source code" + ), + description="test", + mode="python", + ) assert ( on_demand_feature_view_python_native_err.feature_transformation == PythonTransformation(python_native_udf, "python native udf source code") @@ -356,3 +356,65 @@ def test_on_demand_feature_view_stored_writes(): assert transformed_output["output3"] is not None and isinstance( transformed_output["output3"], datetime.datetime ) + + +def test_function_call_syntax(): + CUSTOM_FUNCTION_NAME = "custom-function-name" + file_source = FileSource(name="my-file-source", path="test.parquet") + feature_view = FeatureView( + name="my-feature-view", + entities=[], + schema=[ + Field(name="feature1", dtype=Float32), + Field(name="feature2", dtype=Float32), + ], + source=file_source, + ) + sources = [feature_view] + + def transform_features(features_df: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["output1"] = features_df["feature1"] + df["output2"] = features_df["feature2"] + return df + + odfv = on_demand_feature_view( + sources=sources, + schema=[ + Field(name="output1", dtype=Float32), + Field(name="output2", dtype=Float32), + ], + )(transform_features) + + assert odfv.name == transform_features.__name__ + assert isinstance(odfv, OnDemandFeatureView) + + proto = odfv.to_proto() + assert proto.spec.name == transform_features.__name__ + + deserialized = OnDemandFeatureView.from_proto(proto) + assert deserialized.name == transform_features.__name__ + + def another_transform(features_df: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["output1"] = features_df["feature1"] + df["output2"] = features_df["feature2"] + return df + + odfv_custom = on_demand_feature_view( + name=CUSTOM_FUNCTION_NAME, + sources=sources, + schema=[ + Field(name="output1", dtype=Float32), + Field(name="output2", dtype=Float32), + ], + )(another_transform) + + assert odfv_custom.name == CUSTOM_FUNCTION_NAME + assert isinstance(odfv_custom, OnDemandFeatureView) + + proto = odfv_custom.to_proto() + assert proto.spec.name == CUSTOM_FUNCTION_NAME + + deserialized = OnDemandFeatureView.from_proto(proto) + assert deserialized.name == CUSTOM_FUNCTION_NAME diff --git a/sdk/python/tests/unit/test_on_demand_python_transformation.py b/sdk/python/tests/unit/test_on_demand_python_transformation.py index a0c33fadfda..7ae9f1c70e6 100644 --- a/sdk/python/tests/unit/test_on_demand_python_transformation.py +++ b/sdk/python/tests/unit/test_on_demand_python_transformation.py @@ -1,5 +1,7 @@ import os import re +import sqlite3 +import sys import tempfile import unittest from datetime import datetime, timedelta @@ -20,10 +22,12 @@ from feast.feature_view import DUMMY_ENTITY_FIELD from feast.field import Field from feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig +from feast.nlp_test_data import create_document_chunks_df from feast.on_demand_feature_view import on_demand_feature_view from feast.types import ( Array, Bool, + Bytes, Float32, Float64, Int64, @@ -161,7 +165,11 @@ def python_demo_view(inputs: dict[str, Any]) -> dict[str, Any]: @on_demand_feature_view( sources=[driver_stats_fv[["conv_rate", "acc_rate"]]], schema=[ - Field(name="conv_rate_plus_acc_python_singleton", dtype=Float64) + Field(name="conv_rate_plus_acc_python_singleton", dtype=Float64), + Field( + name="conv_rate_plus_acc_python_singleton_array", + dtype=Array(Float64), + ), ], mode="python", singleton=True, @@ -171,6 +179,7 @@ def python_singleton_view(inputs: dict[str, Any]) -> dict[str, Any]: output["conv_rate_plus_acc_python_singleton"] = ( inputs["conv_rate"] + inputs["acc_rate"] ) + output["conv_rate_plus_acc_python_singleton_array"] = [0.1, 0.2, 0.3] return output @on_demand_feature_view( @@ -852,6 +861,9 @@ def test_stored_writes(self): assert driver_stats_fv.entities == [driver.name] assert driver_stats_fv.entity_columns == [] + ODFV_STRING_CONSTANT = "guaranteed constant" + ODFV_OTHER_STRING_CONSTANT = "somethign else" + @on_demand_feature_view( entities=[driver], sources=[ @@ -863,6 +875,7 @@ def test_stored_writes(self): Field(name="current_datetime", dtype=UnixTimestamp), Field(name="counter", dtype=Int64), Field(name="input_datetime", dtype=UnixTimestamp), + Field(name="string_constant", dtype=String), ], mode="python", write_to_online_store=True, @@ -880,6 +893,7 @@ def python_stored_writes_feature_view( "current_datetime": [datetime.now() for _ in inputs["conv_rate"]], "counter": [c + 1 for c in inputs["counter"]], "input_datetime": [d for d in inputs["input_datetime"]], + "string_constant": [ODFV_STRING_CONSTANT], } return output @@ -933,30 +947,13 @@ def python_stored_writes_feature_view( "created": current_datetime, } ] - odfv_entity_rows_to_write = [ - { - "driver_id": 1001, - "counter": 0, - "input_datetime": current_datetime, - } - ] fv_entity_rows_to_read = [ { "driver_id": 1001, } ] - # Note that here we shouldn't have to pass the request source features for reading - # because they should have already been written to the online store - odfv_entity_rows_to_read = [ - { - "driver_id": 1001, - "conv_rate": 0.25, - "acc_rate": 0.50, - "counter": 0, - "input_datetime": current_datetime, - } - ] - print("storing fv features") + print("") + print("storing FV features") self.store.write_to_online_store( feature_view_name="driver_hourly_stats", df=fv_entity_rows_to_write, @@ -978,11 +975,58 @@ def python_stored_writes_feature_view( "acc_rate": [0.25], } - print("storing odfv features") + # Note that here we shouldn't have to pass the request source features for reading + # because they should have already been written to the online store + odfv_entity_rows_to_write = [ + { + "driver_id": 1002, + "counter": 0, + "conv_rate": 0.25, + "acc_rate": 0.50, + "input_datetime": current_datetime, + "string_constant": ODFV_OTHER_STRING_CONSTANT, + } + ] + odfv_entity_rows_to_read = [ + { + "driver_id": 1002, + "conv_rate_plus_acc": 7, # note how this is not the correct value and would be calculate on demand + "conv_rate": 0.25, + "acc_rate": 0.50, + "counter": 0, + "input_datetime": current_datetime, + "string_constant": ODFV_STRING_CONSTANT, + } + ] + print("storing ODFV features") self.store.write_to_online_store( feature_view_name="python_stored_writes_feature_view", df=odfv_entity_rows_to_write, ) + _conn = sqlite3.connect(self.store.config.online_store.path) + _table_name = ( + self.store.project + + "_" + + self.store.get_on_demand_feature_view( + "python_stored_writes_feature_view" + ).name + ) + sample = pd.read_sql( + f""" + select + feature_name, + value + from {_table_name} + """, + _conn, + ) + assert ( + sample[sample["feature_name"] == "string_constant"]["value"] + .astype(str) + .str.contains("guaranteed constant") + .values[0] + ) + print("reading odfv features") online_odfv_python_response = self.store.get_online_features( entity_rows=odfv_entity_rows_to_read, @@ -991,6 +1035,7 @@ def python_stored_writes_feature_view( "python_stored_writes_feature_view:current_datetime", "python_stored_writes_feature_view:counter", "python_stored_writes_feature_view:input_datetime", + "python_stored_writes_feature_view:string_constant", ], ).to_dict() print(online_odfv_python_response) @@ -1001,5 +1046,248 @@ def python_stored_writes_feature_view( "counter", "current_datetime", "input_datetime", + "string_constant", + ] + ) + # This should be 1 because we write the value of 0 and during the write, the counter is incremented + assert online_odfv_python_response["counter"] == [1] + assert online_odfv_python_response["string_constant"] == [ + ODFV_STRING_CONSTANT + ] + assert online_odfv_python_response["string_constant"] != [ + ODFV_OTHER_STRING_CONSTANT + ] + + def test_stored_writes_with_explode(self): + with tempfile.TemporaryDirectory() as data_dir: + self.store = FeatureStore( + config=RepoConfig( + project="test_on_demand_python_transformation_explode", + registry=os.path.join(data_dir, "registry.db"), + provider="local", + entity_key_serialization_version=3, + online_store=SqliteOnlineStoreConfig( + path=os.path.join(data_dir, "online.db"), + vector_enabled=True, + vector_len=5, + ), + ) + ) + + documents = { + "doc_1": "Hello world. How are you?", + "doc_2": "This is a test. Document chunking example.", + } + start_date = datetime.now() - timedelta(days=15) + end_date = datetime.now() + + documents_df = create_document_chunks_df( + documents, + start_date, + end_date, + embedding_size=60, + ) + corpus_path = os.path.join(data_dir, "documents.parquet") + documents_df.to_parquet(path=corpus_path, allow_truncated_timestamps=True) + + chunk = Entity( + name="chunk", join_keys=["chunk_id"], value_type=ValueType.STRING + ) + document = Entity( + name="document", join_keys=["document_id"], value_type=ValueType.STRING + ) + + input_explode_request_source = RequestSource( + name="counter_source", + schema=[ + Field(name="document_id", dtype=String), + Field(name="document_text", dtype=String), + Field(name="document_bytes", dtype=Bytes), + ], + ) + + @on_demand_feature_view( + entities=[chunk, document], + sources=[ + input_explode_request_source, + ], + schema=[ + Field(name="document_id", dtype=String), + Field(name="chunk_id", dtype=String), + Field(name="chunk_text", dtype=String), + Field( + name="vector", + dtype=Array(Float32), + vector_index=True, + vector_search_metric="L2", + ), + ], + mode="python", + write_to_online_store=True, + ) + def python_stored_writes_feature_view_explode_singleton( + inputs: dict[str, Any], + ): + output: dict[str, Any] = { + "document_id": ["doc_1", "doc_1", "doc_2", "doc_2"], + "chunk_id": ["chunk-1", "chunk-2", "chunk-1", "chunk-2"], + "chunk_text": [ + "hello friends", + "how are you?", + "This is a test.", + "Document chunking example.", + ], + "vector": [ + [0.1] * 5, + [0.2] * 5, + [0.3] * 5, + [0.4] * 5, + ], + } + return output + + assert python_stored_writes_feature_view_explode_singleton.entities == [ + chunk.name, + document.name, + ] + assert ( + python_stored_writes_feature_view_explode_singleton.entity_columns[ + 0 + ].name + == document.join_key + ) + assert ( + python_stored_writes_feature_view_explode_singleton.entity_columns[ + 1 + ].name + == chunk.join_key + ) + + self.store.apply( + [ + chunk, + document, + input_explode_request_source, + python_stored_writes_feature_view_explode_singleton, + ] + ) + odfv_applied = self.store.get_on_demand_feature_view( + "python_stored_writes_feature_view_explode_singleton" + ) + + assert odfv_applied.features[1].vector_index + + assert odfv_applied.entities == [chunk.name, document.name] + + # Note here that after apply() is called, the entity_columns are populated with the join_key + assert odfv_applied.entity_columns[1].name == chunk.join_key + assert odfv_applied.entity_columns[0].name == document.join_key + + assert len(self.store.list_all_feature_views()) == 1 + assert len(self.store.list_feature_views()) == 0 + assert len(self.store.list_on_demand_feature_views()) == 1 + assert len(self.store.list_stream_feature_views()) == 0 + assert ( + python_stored_writes_feature_view_explode_singleton.entity_columns + == self.store.get_on_demand_feature_view( + "python_stored_writes_feature_view_explode_singleton" + ).entity_columns + ) + + odfv_entity_rows_to_write = [ + { + "document_id": "document_1", + "document_text": "Hello world. How are you?", + }, + { + "document_id": "document_2", + "document_text": "This is a test. Document chunking example.", + }, + ] + fv_entity_rows_to_read = [ + { + "document_id": "doc_1", + "chunk_id": "chunk-2", + }, + { + "document_id": "doc_2", + "chunk_id": "chunk-1", + }, + ] + + self.store.write_to_online_store( + feature_view_name="python_stored_writes_feature_view_explode_singleton", + df=odfv_entity_rows_to_write, + ) + _table_name = ( + self.store.project + + "_" + + self.store.get_on_demand_feature_view( + "python_stored_writes_feature_view_explode_singleton" + ).name + ) + _conn = sqlite3.connect(self.store.config.online_store.path) + sample = pd.read_sql( + f""" + select + entity_key, + feature_name, + value + from {_table_name} + """, + _conn, + ) + print(f"\nsample from {_table_name}:\n{sample}") + + # verifying we retrieve doc_1 chunk-2 + filt = (sample["feature_name"] == "chunk_text") & ( + sample["value"] + .apply(lambda x: x.decode("latin1")) + .str.contains("how are") + ) + assert ( + sample[filt]["entity_key"].astype(str).str.contains("doc_1") + & sample[filt]["entity_key"].astype(str).str.contains("chunk-2") + ).values[0] + + print("reading fv features") + online_python_response = self.store.get_online_features( + entity_rows=fv_entity_rows_to_read, + features=[ + "python_stored_writes_feature_view_explode_singleton:document_id", + "python_stored_writes_feature_view_explode_singleton:chunk_id", + "python_stored_writes_feature_view_explode_singleton:chunk_text", + ], + ).to_dict() + assert sorted(list(online_python_response.keys())) == sorted( + [ + "chunk_id", + "chunk_text", + "document_id", ] ) + assert online_python_response == { + "document_id": ["doc_1", "doc_2"], + "chunk_id": ["chunk-2", "chunk-1"], + "chunk_text": ["how are you?", "This is a test."], + } + + if sys.version_info[0:2] == (3, 10): + query_embedding = [0.05] * 5 + online_python_vec_response = self.store.retrieve_online_documents_v2( + features=[ + "python_stored_writes_feature_view_explode_singleton:document_id", + "python_stored_writes_feature_view_explode_singleton:chunk_id", + "python_stored_writes_feature_view_explode_singleton:chunk_text", + ], + query=query_embedding, + top_k=2, + ).to_dict() + + assert online_python_vec_response is not None + assert online_python_vec_response == { + "document_id": ["doc_1", "doc_1"], + "chunk_id": ["chunk-1", "chunk-2"], + "chunk_text": ["hello friends", "how are you?"], + "distance": [0.11180340498685837, 0.3354102075099945], + } diff --git a/sdk/python/tests/unit/test_repo_operations_validate_feast_project_name.py b/sdk/python/tests/unit/test_repo_operations_validate_feast_project_name.py index 0dc4b2651b0..33d1d5307d6 100644 --- a/sdk/python/tests/unit/test_repo_operations_validate_feast_project_name.py +++ b/sdk/python/tests/unit/test_repo_operations_validate_feast_project_name.py @@ -21,6 +21,6 @@ def test_is_valid_name(): ] for name, expected in test_cases: - assert ( - is_valid_name(name) == expected - ), f"Failed for project invalid name: {name}" + assert is_valid_name(name) == expected, ( + f"Failed for project invalid name: {name}" + ) diff --git a/sdk/python/tests/unit/test_stream_feature_view.py b/sdk/python/tests/unit/test_stream_feature_view.py index 4f93691028e..96e62d9d9e2 100644 --- a/sdk/python/tests/unit/test_stream_feature_view.py +++ b/sdk/python/tests/unit/test_stream_feature_view.py @@ -4,7 +4,6 @@ import pytest from feast.aggregation import Aggregation -from feast.batch_feature_view import BatchFeatureView from feast.data_format import AvroFormat from feast.data_source import KafkaSource, PushSource from feast.entity import Entity @@ -18,37 +17,6 @@ from feast.utils import _utc_now, make_tzaware -def test_create_batch_feature_view(): - batch_source = FileSource(path="some path") - BatchFeatureView( - name="test batch feature view", - entities=[], - ttl=timedelta(days=30), - source=batch_source, - ) - - with pytest.raises(TypeError): - BatchFeatureView( - name="test batch feature view", entities=[], ttl=timedelta(days=30) - ) - - stream_source = KafkaSource( - name="kafka", - timestamp_field="event_timestamp", - kafka_bootstrap_servers="", - message_format=AvroFormat(""), - topic="topic", - batch_source=FileSource(path="some path"), - ) - with pytest.raises(ValueError): - BatchFeatureView( - name="test batch feature view", - entities=[], - ttl=timedelta(days=30), - source=stream_source, - ) - - def test_create_stream_feature_view(): stream_source = KafkaSource( name="kafka", @@ -64,6 +32,7 @@ def test_create_stream_feature_view(): ttl=timedelta(days=30), source=stream_source, aggregations=[], + udf=lambda x: x, ) push_source = PushSource( @@ -75,6 +44,7 @@ def test_create_stream_feature_view(): ttl=timedelta(days=30), source=push_source, aggregations=[], + udf=lambda x: x, ) with pytest.raises(TypeError): @@ -92,6 +62,7 @@ def test_create_stream_feature_view(): ttl=timedelta(days=30), source=FileSource(path="some path"), aggregations=[], + udf=lambda x: x, ) @@ -173,7 +144,7 @@ def pandas_udf(pandas_df): import pandas as pd assert type(pandas_df) == pd.DataFrame - df = pandas_df.transform(lambda x: x + 10, axis=1) + df = pandas_df.transform(lambda x: x + 10) return df import pandas as pd @@ -230,6 +201,7 @@ def test_stream_feature_view_proto_type(): ttl=timedelta(days=30), source=stream_source, aggregations=[], + udf=lambda x: x, ) assert sfv.proto_class is StreamFeatureViewProto @@ -249,6 +221,7 @@ def test_stream_feature_view_copy(): ttl=timedelta(days=30), source=stream_source, aggregations=[], + udf=lambda x: x, ) assert sfv == copy.copy(sfv) diff --git a/sdk/python/tests/unit/test_unit_feature_store.py b/sdk/python/tests/unit/test_unit_feature_store.py index 19a133564f2..3bad0ec6c59 100644 --- a/sdk/python/tests/unit/test_unit_feature_store.py +++ b/sdk/python/tests/unit/test_unit_feature_store.py @@ -1,6 +1,8 @@ from dataclasses import dataclass from typing import Dict, List +import pytest + from feast import utils from feast.protos.feast.types.Value_pb2 import Value @@ -17,7 +19,7 @@ class MockFeatureView: projection: MockFeatureViewProjection -def test_get_unique_entities(): +def test_get_unique_entities_success(): entity_values = { "entity_1": [Value(int64_val=1), Value(int64_val=2), Value(int64_val=1)], "entity_2": [ @@ -36,14 +38,94 @@ def test_get_unique_entities(): projection=MockFeatureViewProjection(join_key_map={}), ) - unique_entities, indexes = utils._get_unique_entities( + unique_entities, indexes, output_len = utils._get_unique_entities( table=fv, join_key_values=entity_values, entity_name_to_join_key_map=entity_name_to_join_key_map, ) - - assert unique_entities == ( + expected_entities = ( {"entity_1": Value(int64_val=1), "entity_2": Value(string_val="1")}, {"entity_1": Value(int64_val=2), "entity_2": Value(string_val="2")}, ) - assert indexes == ([0, 2], [1]) + expected_indexes = ([0, 2], [1]) + + assert unique_entities == expected_entities + assert indexes == expected_indexes + assert output_len == 3 + + +def test_get_unique_entities_missing_join_key_success(): + """ + Tests that _get_unique_entities raises a KeyError when a required join key is missing. + """ + # Here, we omit the required key for "entity_1" + entity_values = { + "entity_2": [ + Value(string_val="1"), + Value(string_val="2"), + Value(string_val="1"), + ], + } + + entity_name_to_join_key_map = {"entity_1": "entity_1", "entity_2": "entity_2"} + + fv = MockFeatureView( + name="fv_1", + entities=["entity_1", "entity_2"], + projection=MockFeatureViewProjection(join_key_map={}), + ) + + unique_entities, indexes, output_len = utils._get_unique_entities( + table=fv, + join_key_values=entity_values, + entity_name_to_join_key_map=entity_name_to_join_key_map, + ) + expected_entities = ( + {"entity_2": Value(string_val="1")}, + {"entity_2": Value(string_val="2")}, + ) + expected_indexes = ([0, 2], [1]) + + assert unique_entities == expected_entities + assert indexes == expected_indexes + assert output_len == 3 + # We're not say anything about the entity_1 missing from the unique_entities list + assert "entity_1" not in [entity.keys() for entity in unique_entities] + + +def test_get_unique_entities_missing_all_join_keys_error(): + """ + Tests that _get_unique_entities raises a KeyError when all required join keys are missing. + """ + entity_values_not_in_feature_view = { + "entity_3": [Value(string_val="3")], + } + entity_name_to_join_key_map = { + "entity_1": "entity_1", + "entity_2": "entity_2", + "entity_3": "entity_3", + } + + fv = MockFeatureView( + name="fv_1", + entities=["entity_1", "entity_2"], + projection=MockFeatureViewProjection(join_key_map={}), + ) + + with pytest.raises(KeyError) as excinfo: + utils._get_unique_entities( + table=fv, + join_key_values=entity_values_not_in_feature_view, + entity_name_to_join_key_map=entity_name_to_join_key_map, + ) + + error_message = str(excinfo.value) + assert ( + "Missing join key values for keys: ['entity_1', 'entity_2', 'entity_3']" + in error_message + ) + assert ( + "No values provided for keys: ['entity_1', 'entity_2', 'entity_3']" + in error_message + ) + assert "Provided join_key_values: ['entity_3']" in error_message diff --git a/sdk/python/tests/utils/auth_permissions_util.py b/sdk/python/tests/utils/auth_permissions_util.py index 6f0a3c8eeac..dcc456e1d82 100644 --- a/sdk/python/tests/utils/auth_permissions_util.py +++ b/sdk/python/tests/utils/auth_permissions_util.py @@ -60,6 +60,7 @@ def start_feature_server( metrics: bool = False, tls_key_path: str = "", tls_cert_path: str = "", + ca_trust_store_path: str = "", ): host = "0.0.0.0" cmd = [ @@ -100,9 +101,9 @@ def start_feature_server( timeout_msg="Unable to start the Prometheus server in 60 seconds.", ) else: - assert not check_port_open( - "localhost", 8000 - ), "Prometheus server is running when it should be disabled." + assert not check_port_open("localhost", 8000), ( + "Prometheus server is running when it should be disabled." + ) online_server_url = ( f"https://localhost:{server_port}" @@ -127,18 +128,30 @@ def start_feature_server( def get_remote_registry_store(server_port, feature_store, tls_mode): - is_tls_mode, _, tls_cert_path = tls_mode + is_tls_mode, _, tls_cert_path, ca_trust_store_path = tls_mode if is_tls_mode: - registry_config = RemoteRegistryConfig( - registry_type="remote", - path=f"localhost:{server_port}", - cert=tls_cert_path, - ) + if ca_trust_store_path: + registry_config = RemoteRegistryConfig( + registry_type="remote", + path=f"localhost:{server_port}", + is_tls=True, + ) + else: + registry_config = RemoteRegistryConfig( + registry_type="remote", + path=f"localhost:{server_port}", + is_tls=True, + cert=tls_cert_path, + ) else: registry_config = RemoteRegistryConfig( registry_type="remote", path=f"localhost:{server_port}" ) + if is_tls_mode and ca_trust_store_path: + # configure trust store path only when is_tls_mode and ca_trust_store_path exists. + os.environ["FEAST_CA_CERT_FILE_PATH"] = ca_trust_store_path + store = FeatureStore( config=RepoConfig( project=PROJECT_NAME, diff --git a/sdk/python/tests/utils/cli_repo_creator.py b/sdk/python/tests/utils/cli_repo_creator.py index 92b6dd992aa..46df563eafb 100644 --- a/sdk/python/tests/utils/cli_repo_creator.py +++ b/sdk/python/tests/utils/cli_repo_creator.py @@ -51,7 +51,14 @@ def run_with_output(self, args: List[str], cwd: Path) -> Tuple[int, bytes]: return e.returncode, e.output @contextmanager - def local_repo(self, example_repo_py: str, offline_store: str): + def local_repo( + self, + example_repo_py: str, + offline_store: str, + online_store: str = "sqlite", + apply=True, + teardown=True, + ): """ Convenience method to set up all the boilerplate for a local feature repo. """ @@ -59,46 +66,69 @@ def local_repo(self, example_repo_py: str, offline_store: str): random.choice(string.ascii_lowercase + string.digits) for _ in range(10) ) - with tempfile.TemporaryDirectory() as repo_dir_name, tempfile.TemporaryDirectory() as data_dir_name: + with ( + tempfile.TemporaryDirectory() as repo_dir_name, + tempfile.TemporaryDirectory() as data_dir_name, + ): repo_path = Path(repo_dir_name) data_path = Path(data_dir_name) repo_config = repo_path / "feature_store.yaml" - - repo_config.write_text( - dedent( + if online_store == "sqlite": + yaml_config = dedent( f""" - project: {project_id} - registry: {data_path / "registry.db"} - provider: local - online_store: - path: {data_path / "online_store.db"} - offline_store: - type: {offline_store} - entity_key_serialization_version: 2 - """ + project: {project_id} + registry: {data_path / "registry.db"} + provider: local + online_store: + path: {data_path / "online_store.db"} + offline_store: + type: {offline_store} + entity_key_serialization_version: 2 + """ ) - ) + elif online_store == "milvus": + yaml_config = dedent( + f""" + project: {project_id} + registry: {data_path / "registry.db"} + provider: local + online_store: + path: {data_path / "online_store.db"} + type: milvus + vector_enabled: true + embedding_dim: 10 + offline_store: + type: {offline_store} + entity_key_serialization_version: 3 + """ + ) + else: + pass + + repo_config.write_text(yaml_config) repo_example = repo_path / "example.py" repo_example.write_text(example_repo_py) - result = self.run(["apply"], cwd=repo_path) - stdout = result.stdout.decode("utf-8") - stderr = result.stderr.decode("utf-8") - print(f"Apply stdout:\n{stdout}") - print(f"Apply stderr:\n{stderr}") - assert ( - result.returncode == 0 - ), f"stdout: {result.stdout}\nstderr: {result.stderr}" + if apply: + result = self.run(["apply"], cwd=repo_path) + stdout = result.stdout.decode("utf-8") + stderr = result.stderr.decode("utf-8") + print(f"Apply stdout:\n{stdout}") + print(f"Apply stderr:\n{stderr}") + assert result.returncode == 0, ( + f"stdout: {result.stdout}\nstderr: {result.stderr}" + ) yield FeatureStore(repo_path=str(repo_path), config=None) - result = self.run(["teardown"], cwd=repo_path) - stdout = result.stdout.decode("utf-8") - stderr = result.stderr.decode("utf-8") - print(f"Apply stdout:\n{stdout}") - print(f"Apply stderr:\n{stderr}") - assert ( - result.returncode == 0 - ), f"stdout: {result.stdout}\nstderr: {result.stderr}" + if teardown: + result = self.run(["teardown"], cwd=repo_path) + stdout = result.stdout.decode("utf-8") + stderr = result.stderr.decode("utf-8") + print(f"Apply stdout:\n{stdout}") + print(f"Apply stderr:\n{stderr}") + assert result.returncode == 0, ( + f"stdout: {result.stdout}\nstderr: {result.stderr}" + ) diff --git a/sdk/python/tests/utils/e2e_test_validation.py b/sdk/python/tests/utils/e2e_test_validation.py index a08e8fef429..ed66aead87d 100644 --- a/sdk/python/tests/utils/e2e_test_validation.py +++ b/sdk/python/tests/utils/e2e_test_validation.py @@ -131,17 +131,17 @@ def _check_offline_and_online_features( if full_feature_names: if expected_value: assert response_dict[f"{fv.name}__value"][0], f"Response: {response_dict}" - assert ( - abs(response_dict[f"{fv.name}__value"][0] - expected_value) < 1e-6 - ), f"Response: {response_dict}, Expected: {expected_value}" + assert abs(response_dict[f"{fv.name}__value"][0] - expected_value) < 1e-6, ( + f"Response: {response_dict}, Expected: {expected_value}" + ) else: assert response_dict[f"{fv.name}__value"][0] is None else: if expected_value: assert response_dict["value"][0], f"Response: {response_dict}" - assert ( - abs(response_dict["value"][0] - expected_value) < 1e-6 - ), f"Response: {response_dict}, Expected: {expected_value}" + assert abs(response_dict["value"][0] - expected_value) < 1e-6, ( + f"Response: {response_dict}, Expected: {expected_value}" + ) else: assert response_dict["value"][0] is None diff --git a/sdk/python/tests/utils/generate_self_signed_certifcate_util.py b/sdk/python/tests/utils/generate_self_signed_certifcate_util.py deleted file mode 100644 index 1b0b212818c..00000000000 --- a/sdk/python/tests/utils/generate_self_signed_certifcate_util.py +++ /dev/null @@ -1,73 +0,0 @@ -import logging -from datetime import datetime, timedelta - -from cryptography import x509 -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.x509.oid import NameOID - -logger = logging.getLogger(__name__) - - -def generate_self_signed_cert( - cert_path="cert.pem", key_path="key.pem", common_name="localhost" -): - """ - Generate a self-signed certificate and save it to the specified paths. - - :param cert_path: Path to save the certificate (PEM format) - :param key_path: Path to save the private key (PEM format) - :param common_name: Common name (CN) for the certificate, defaults to 'localhost' - """ - # Generate private key - key = rsa.generate_private_key( - public_exponent=65537, key_size=2048, backend=default_backend() - ) - - # Create a self-signed certificate - subject = issuer = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "California"), - x509.NameAttribute(NameOID.LOCALITY_NAME, "San Francisco"), - x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Feast"), - x509.NameAttribute(NameOID.COMMON_NAME, common_name), - ] - ) - - certificate = ( - x509.CertificateBuilder() - .subject_name(subject) - .issuer_name(issuer) - .public_key(key.public_key()) - .serial_number(x509.random_serial_number()) - .not_valid_before(datetime.utcnow()) - .not_valid_after( - # Certificate valid for 1 year - datetime.utcnow() + timedelta(days=365) - ) - .add_extension( - x509.SubjectAlternativeName([x509.DNSName(common_name)]), - critical=False, - ) - .sign(key, hashes.SHA256(), default_backend()) - ) - - # Write the private key to a file - with open(key_path, "wb") as f: - f.write( - key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption(), - ) - ) - - # Write the certificate to a file - with open(cert_path, "wb") as f: - f.write(certificate.public_bytes(serialization.Encoding.PEM)) - - logger.info( - f"Self-signed certificate and private key have been generated at {cert_path} and {key_path}." - ) diff --git a/sdk/python/tests/utils/ssl_certifcates_util.py b/sdk/python/tests/utils/ssl_certifcates_util.py new file mode 100644 index 00000000000..53a56e04f3d --- /dev/null +++ b/sdk/python/tests/utils/ssl_certifcates_util.py @@ -0,0 +1,174 @@ +import ipaddress +import logging +import os +import shutil +from datetime import datetime, timedelta + +import certifi +from cryptography import x509 +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.x509 import load_pem_x509_certificate +from cryptography.x509.oid import NameOID + +logger = logging.getLogger(__name__) + + +def generate_self_signed_cert( + cert_path="cert.pem", key_path="key.pem", common_name="localhost" +): + """ + Generate a self-signed certificate and save it to the specified paths. + + :param cert_path: Path to save the certificate (PEM format) + :param key_path: Path to save the private key (PEM format) + :param common_name: Common name (CN) for the certificate, defaults to 'localhost' + """ + # Generate private key + key = rsa.generate_private_key( + public_exponent=65537, key_size=2048, backend=default_backend() + ) + + # Create a self-signed certificate + subject = issuer = x509.Name( + [ + x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "California"), + x509.NameAttribute(NameOID.LOCALITY_NAME, "San Francisco"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Feast"), + x509.NameAttribute(NameOID.COMMON_NAME, common_name), + ] + ) + + # Define the certificate's Subject Alternative Names (SANs) + alt_names = [ + x509.DNSName("localhost"), # Hostname + x509.IPAddress(ipaddress.IPv4Address("127.0.0.1")), # Localhost IP + x509.IPAddress(ipaddress.IPv4Address("0.0.0.0")), # Bind-all IP (optional) + ] + san = x509.SubjectAlternativeName(alt_names) + + certificate = ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(key.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.utcnow()) + .not_valid_after( + # Certificate valid for 1 year + datetime.utcnow() + timedelta(days=365) + ) + .add_extension(san, critical=False) + .sign(key, hashes.SHA256(), default_backend()) + ) + + # Write the private key to a file + with open(key_path, "wb") as f: + f.write( + key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + ) + + # Write the certificate to a file + with open(cert_path, "wb") as f: + f.write(certificate.public_bytes(serialization.Encoding.PEM)) + + logger.info( + f"Self-signed certificate and private key have been generated at {cert_path} and {key_path}." + ) + + +def create_ca_trust_store( + public_key_path: str, private_key_path: str, output_trust_store_path: str +): + """ + Create a new CA trust store as a copy of the existing one (if available), + and add the provided public certificate to it. + + :param public_key_path: Path to the public certificate (e.g., PEM file). + :param private_key_path: Path to the private key (optional, to verify signing authority). + :param output_trust_store_path: Path to save the new trust store. + """ + try: + # Step 1: Identify the existing trust store (if available via environment variables) + existing_trust_store = os.environ.get("SSL_CERT_FILE") or os.environ.get( + "REQUESTS_CA_BUNDLE" + ) + + # Step 2: Copy the existing trust store to the new location (if it exists) + if existing_trust_store and os.path.exists(existing_trust_store): + shutil.copy(existing_trust_store, output_trust_store_path) + logger.info( + f"Copied existing trust store from {existing_trust_store} to {output_trust_store_path}" + ) + else: + # Log the creation of a new trust store (without opening a file unnecessarily) + logger.info( + f"No existing trust store found. Creating a new trust store at {output_trust_store_path}" + ) + + # Step 3: Load and validate the public certificate + with open(public_key_path, "rb") as pub_file: + public_cert_data = pub_file.read() + public_cert = load_pem_x509_certificate( + public_cert_data, backend=default_backend() + ) + + # Verify the private key matches (optional, adds validation) + if private_key_path: + with open(private_key_path, "rb") as priv_file: + private_key_data = priv_file.read() + private_key = serialization.load_pem_private_key( + private_key_data, password=None, backend=default_backend() + ) + # Check the public/private key match + if ( + private_key.public_key().public_numbers() + != public_cert.public_key().public_numbers() + ): + raise ValueError( + "Public certificate does not match the private key." + ) + + # Step 4: Add the public certificate to the new trust store + with open(output_trust_store_path, "ab") as trust_store_file: + trust_store_file.write(public_cert.public_bytes(serialization.Encoding.PEM)) + + logger.info( + f"Trust store created/updated successfully at: {output_trust_store_path}" + ) + + except Exception as e: + logger.error(f"Error creating CA trust store: {e}") + + +def combine_trust_stores(custom_cert_path: str, output_combined_path: str): + """ + Combine the default certifi CA bundle with a custom certificate file. + + :param custom_cert_path: Path to the custom certificate PEM file. + :param output_combined_path: Path where the combined CA bundle will be saved. + """ + try: + # Get the default certifi CA bundle + certifi_ca_bundle = certifi.where() + + with open(output_combined_path, "wb") as combined_file: + # Write the default CA bundle + with open(certifi_ca_bundle, "rb") as default_file: + combined_file.write(default_file.read()) + + # Append the custom certificates + with open(custom_cert_path, "rb") as custom_file: + combined_file.write(custom_file.read()) + + logger.info(f"Combined trust store created at: {output_combined_path}") + + except Exception as e: + logger.error(f"Error combining trust stores: {e}") + raise e diff --git a/setup.py b/setup.py index 5a6581cc853..91af19d6a0f 100644 --- a/setup.py +++ b/setup.py @@ -11,12 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import glob import os import pathlib import re import shutil +import subprocess +from subprocess import CalledProcessError +import sys +from pathlib import Path -from setuptools import find_packages, setup +from setuptools import find_packages, setup, Command NAME = "feast" DESCRIPTION = "Python SDK for Feast" @@ -28,13 +33,13 @@ "click>=7.0.0,<9.0.0", "colorama>=0.3.9,<1", "dill~=0.3.0", - "protobuf>=4.24.0,<5.0.0", + "protobuf>=4.24.0", "Jinja2>=2,<4", "jsonschema", "mmh3", "numpy>=1.22,<2", "pandas>=1.4.3,<3", - "pyarrow>=9.0.0", + "pyarrow<18.1.0", "pydantic>=2.0.0", "pygments>=2.12.0,<3", "PyYAML>=5.4.0,<7", @@ -85,7 +90,7 @@ ] SQLITE_VEC_REQUIRED = [ - "sqlite-vec==v0.1.1", + "sqlite-vec==v0.1.6", ] TRINO_REQUIRED = ["trino>=0.305.0,<0.400.0", "regex"] @@ -105,7 +110,7 @@ "cassandra-driver>=3.24.0,<4", ] -GE_REQUIRED = ["great_expectations>=0.15.41"] +GE_REQUIRED = ["great_expectations>=0.15.41,<1"] AZURE_REQUIRED = [ "azure-storage-blob>=0.37.0", @@ -138,30 +143,43 @@ DELTA_REQUIRED = ["deltalake"] +DOCLING_REQUIRED = ["docling>=2.23.0"] + ELASTICSEARCH_REQUIRED = ["elasticsearch>=8.13.0"] -SINGLESTORE_REQUIRED = ["singlestoredb"] +SINGLESTORE_REQUIRED = ["singlestoredb<1.8.0"] -COUCHBASE_REQUIRED = ["couchbase==4.3.2"] +COUCHBASE_REQUIRED = [ + "couchbase==4.3.2", + "couchbase-columnar==1.0.0" +] MSSQL_REQUIRED = ["ibis-framework[mssql]>=9.0.0,<10"] FAISS_REQUIRED = ["faiss-cpu>=1.7.0,<2"] - QDRANT_REQUIRED = ["qdrant-client>=1.12.0"] +GO_REQUIRED = ["cffi>=1.15.0"] + +MILVUS_REQUIRED = ["pymilvus"] + +TORCH_REQUIRED = [ + "torch>=2.2.2", + "torchvision>=0.17.2", +] + CI_REQUIRED = ( [ "build", "virtualenv==20.23.0", - "cryptography>=35.0,<43", - "ruff>=0.3.3", + "cryptography>=43.0,<44", + "ruff>=0.8.0", "mypy-protobuf>=3.1", "grpcio-tools>=1.56.2,<2", "grpcio-testing>=1.56.2,<2", # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656). - "httpx>=0.23.3", - "minio==7.1.0", + "httpx==0.27.2", + "minio==7.2.11", "mock==2.0.0", "moto<5", "mypy>=1.4.1,<1.11.3", @@ -179,7 +197,7 @@ "pytest-mock==1.10.4", "pytest-env", "Sphinx>4.0.0,<7", - "testcontainers==4.4.0", + "testcontainers==4.8.2", "python-keycloak==4.2.2", "pre-commit<3.3.2", "assertpy==1.1", @@ -220,8 +238,15 @@ + OPENTELEMETRY + FAISS_REQUIRED + QDRANT_REQUIRED + + MILVUS_REQUIRED + + DOCLING_REQUIRED + + TORCH_REQUIRED +) +NLP_REQUIRED = ( + DOCLING_REQUIRED + + MILVUS_REQUIRED + + TORCH_REQUIRED ) - DOCS_REQUIRED = CI_REQUIRED DEV_REQUIRED = CI_REQUIRED @@ -248,6 +273,7 @@ PYTHON_CODE_PREFIX = "sdk/python" + setup( name=NAME, author=AUTHOR, @@ -291,7 +317,12 @@ "couchbase": COUCHBASE_REQUIRED, "opentelemetry": OPENTELEMETRY, "faiss": FAISS_REQUIRED, - "qdrant": QDRANT_REQUIRED + "qdrant": QDRANT_REQUIRED, + "go": GO_REQUIRED, + "milvus": MILVUS_REQUIRED, + "docling": DOCLING_REQUIRED, + "pytorch": TORCH_REQUIRED, + "nlp": NLP_REQUIRED, }, include_package_data=True, license="Apache", diff --git a/ui/.nvmrc b/ui/.nvmrc index 67e145bf0f9..cc7ce7f49fe 100644 --- a/ui/.nvmrc +++ b/ui/.nvmrc @@ -1 +1 @@ -v20.18.0 +v22.13.1 diff --git a/ui/README.md b/ui/README.md index a2326e1a9ef..bf9ccd367d9 100644 --- a/ui/README.md +++ b/ui/README.md @@ -77,7 +77,7 @@ The advantage of importing Feast UI as a module is in the ease of customization. ##### Fetching the Project List -You can use `projectListPromise` to provide a promise that overrides where the Feast UI fetches the project list from. +By default, the Feast UI fetches the project list from the app root path. You can use `projectListPromise` to provide a promise that overrides where it's fetched from. ```jsx /src"], collectCoverageFrom: ["src/**/*.{js,jsx,ts,tsx}", "!src/**/*.d.ts"], @@ -7,7 +9,13 @@ module.exports = { "/src/**/__tests__/**/*.{js,jsx,ts,tsx}", "/src/**/*.{spec,test}.{js,jsx,ts,tsx}", ], - testEnvironment: "jsdom", + // Couldn't get tests working with msw 2 in jsdom or jest-fixed-jsdom, + // happy-dom finally worked with added globals + testEnvironment: "@happy-dom/jest-environment", + // https://mswjs.io/docs/migrations/1.x-to-2.x#cannot-find-module-mswnode-jsdom + testEnvironmentOptions: { + customExportConditions: [''], + }, transform: { "^.+\\.(js|jsx|mjs|cjs|ts|tsx)$": "/config/jest/babelTransform.js", "^.+\\.css$": "/config/jest/cssTransform.js", @@ -15,7 +23,7 @@ module.exports = { "/config/jest/fileTransform.js", }, transformIgnorePatterns: [ - "[/\\\\]node_modules[/\\\\].+\\.(js|jsx|mjs|cjs|ts|tsx)$", + `[/\\\\]node_modules[/\\\\](?!(${transformNodeModules.map(name => name.replaceAll('/', '[/\\\\]')).join('|')})[/\\\\])`, "^.+\\.module\\.(css|sass|scss)$", ], modulePaths: [], diff --git a/ui/package.json b/ui/package.json index 7f583e29153..f2ac73ef595 100644 --- a/ui/package.json +++ b/ui/package.json @@ -1,6 +1,6 @@ { "name": "@feast-dev/feast-ui", - "version": "0.41.0", + "version": "0.46.0", "private": false, "files": [ "dist" @@ -28,15 +28,16 @@ "@emotion/css": "^11.13.0", "@emotion/react": "^11.13.3", "inter-ui": "^3.19.3", + "long": "^5.2.3", "moment": "^2.29.1", "protobufjs": "^7.1.1", "query-string": "^7.1.1", "react-app-polyfill": "^3.0.0", "react-code-blocks": "^0.1.6", "react-query": "^3.39.3", - "react-router-dom": "<6.4.0", + "react-router-dom": "^6.28.0", "tslib": "^2.3.1", - "use-query-params": "^1.2.3", + "use-query-params": "^2.2.1", "zod": "^3.11.6" }, "scripts": { @@ -55,6 +56,7 @@ "@babel/plugin-proposal-private-property-in-object": "^7.21.11", "@babel/preset-env": "^7.25.8", "@babel/preset-react": "^7.25.7", + "@happy-dom/jest-environment": "^16.7.3", "@pmmmwh/react-refresh-webpack-plugin": "^0.5.3", "@rollup/plugin-babel": "^5.3.1", "@rollup/plugin-commonjs": "^21.0.2", @@ -67,7 +69,7 @@ "@testing-library/react": "^16.0.1", "@testing-library/user-event": "^14.5.2", "@types/jest": "^27.0.1", - "@types/node": "^20.16.13", + "@types/node": "^22.12.0", "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "babel-jest": "^27.4.2", @@ -89,11 +91,12 @@ "fs-extra": "^10.0.0", "html-webpack-plugin": "^5.5.0", "identity-obj-proxy": "^3.0.0", - "jest": "^27.4.3", - "jest-resolve": "^27.4.2", - "jest-watch-typeahead": "^1.0.0", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-watch-typeahead": "^2.2.2", "mini-css-extract-plugin": "^2.4.5", - "msw": "^0.36.8", + "msw": "^2.7.0", "postcss": "^8.4.4", "postcss-flexbugs-fixes": "^5.0.2", "postcss-loader": "^6.2.1", @@ -119,12 +122,15 @@ "source-map-loader": "^3.0.0", "style-loader": "^3.3.1", "terser-webpack-plugin": "^5.2.5", - "typescript": "^4.9.5", + "typescript": "~5.7.2", "webpack": "^5.64.4", "webpack-dev-server": "^4.6.0", "webpack-manifest-plugin": "^4.0.2", "workbox-webpack-plugin": "^6.4.1" }, + "resolutions": { + "nwsapi": "2.2.13" + }, "description": "Web UI for the [Feast Feature Store](https://feast.dev/)", "repository": { "type": "git", diff --git a/ui/public/registry.db b/ui/public/registry.db index 617771999c7..ae9a05a4a97 100644 Binary files a/ui/public/registry.db and b/ui/public/registry.db differ diff --git a/ui/src/FeastUI.tsx b/ui/src/FeastUI.tsx index 628b916f2d8..067296316ec 100644 --- a/ui/src/FeastUI.tsx +++ b/ui/src/FeastUI.tsx @@ -3,7 +3,7 @@ import React from "react"; import { BrowserRouter } from "react-router-dom"; import { QueryClient, QueryClientProvider } from "react-query"; import { QueryParamProvider } from "use-query-params"; -import RouteAdapter from "./hacks/RouteAdapter"; +import { ReactRouter6Adapter } from 'use-query-params/adapters/react-router-6'; import FeastUISansProviders, { FeastUIConfigs } from "./FeastUISansProviders"; interface FeastUIProps { @@ -15,14 +15,17 @@ const defaultQueryClient = new QueryClient(); const FeastUI = ({ reactQueryClient, feastUIConfigs }: FeastUIProps) => { const queryClient = reactQueryClient || defaultQueryClient; + const basename = process.env.PUBLIC_URL ?? ''; return ( - + // Disable v7_relativeSplatPath: custom tab routes don't currently work with it + - - + + diff --git a/ui/src/FeastUISansProviders.test.tsx b/ui/src/FeastUISansProviders.test.tsx index 4af9490e10b..94bd2dfbe35 100644 --- a/ui/src/FeastUISansProviders.test.tsx +++ b/ui/src/FeastUISansProviders.test.tsx @@ -55,7 +55,7 @@ test("full app rendering", async () => { expect(screen.getByText(/Explore this Project/i)).toBeInTheDocument(); const projectNameRegExp = new RegExp( - parsedRegistry.projectMetadata[0].project!, + parsedRegistry.projects[0].spec?.name!, "i" ); @@ -89,7 +89,7 @@ test("routes are reachable", async () => { const routeRegExp = new RegExp(routeName, "i"); - await user.click(screen.getByRole("button", { name: routeRegExp })); + await user.click(screen.getByRole("link", { name: routeRegExp })); // Should land on a page with the heading screen.getByRole("heading", { @@ -112,7 +112,7 @@ test("features are reachable", async () => { await screen.findByText(/Explore this Project/i); const routeRegExp = new RegExp("Feature Views", "i"); - await user.click(screen.getByRole("button", { name: routeRegExp })); + await user.click(screen.getByRole("link", { name: routeRegExp })); screen.getByRole("heading", { name: "Feature Views", diff --git a/ui/src/FeastUISansProviders.tsx b/ui/src/FeastUISansProviders.tsx index 8a12abdc39f..52676c5d0b5 100644 --- a/ui/src/FeastUISansProviders.tsx +++ b/ui/src/FeastUISansProviders.tsx @@ -40,8 +40,8 @@ interface FeastUIConfigs { projectListPromise?: Promise; } -const defaultProjectListPromise = () => { - return fetch("/projects-list.json", { +const defaultProjectListPromise = (basename: string) => { + return fetch(`${basename}/projects-list.json`, { headers: { "Content-Type": "application/json", }, @@ -51,8 +51,10 @@ const defaultProjectListPromise = () => { }; const FeastUISansProviders = ({ + basename = "", feastUIConfigs, }: { + basename?: string; feastUIConfigs?: FeastUIConfigs; }) => { const projectListContext: ProjectsListContextInterface = @@ -61,9 +63,7 @@ const FeastUISansProviders = ({ projectsListPromise: feastUIConfigs?.projectListPromise, isCustom: true, } - : { projectsListPromise: defaultProjectListPromise(), isCustom: false }; - - const BASE_URL = process.env.PUBLIC_URL || "" + : { projectsListPromise: defaultProjectListPromise(basename), isCustom: false }; return ( @@ -76,9 +76,9 @@ const FeastUISansProviders = ({ > - }> + }> } /> - }> + }> } /> } /> - !!(event.metaKey || event.altKey || event.ctrlKey || event.shiftKey); - -const isLeftClickEvent = (event) => event.button === 0; - -const isTargetBlank = (event) => { - const target = event.target.getAttribute("target"); - return target && target !== "_self"; -}; - -export default function EuiCustomLink({ to, ...rest }) { - // This is the key! - const navigate = useNavigate(); - - function onClick(event) { - if (event.defaultPrevented) { - return; - } - - // Let the browser handle links that open new tabs/windows - if ( - isModifiedEvent(event) || - !isLeftClickEvent(event) || - isTargetBlank(event) - ) { - return; - } - - // Prevent regular link behavior, which causes a browser refresh. - event.preventDefault(); - - // Push the route to the history. - navigate(to); - } - - // Generate the correct link href (with basename accounted for) - const href = useHref({ pathname: to }); - - const props = { ...rest, href, onClick }; - return ; -} diff --git a/ui/src/components/EuiCustomLink.tsx b/ui/src/components/EuiCustomLink.tsx new file mode 100644 index 00000000000..bf180baaa97 --- /dev/null +++ b/ui/src/components/EuiCustomLink.tsx @@ -0,0 +1,48 @@ +import React from "react"; +import { EuiLink, type EuiLinkAnchorProps } from "@elastic/eui"; +import { useNavigate, useHref, type To } from "react-router-dom"; + +interface EuiCustomLinkProps extends Omit { + to: To; +} + +const isModifiedEvent = (event: React.MouseEvent) => + !!(event.metaKey || event.altKey || event.ctrlKey || event.shiftKey); + +const isLeftClickEvent = (event: React.MouseEvent) => event.button === 0; + +const isTargetBlank = (event: React.MouseEvent) => { + const target = (event.target as Element).getAttribute("target"); + return target && target !== "_self"; +}; + +export default function EuiCustomLink({ to, ...rest }: EuiCustomLinkProps) { + // This is the key! + const navigate = useNavigate(); + + const onClick: React.MouseEventHandler = (event) => { + if (event.defaultPrevented) { + return; + } + + // Let the browser handle links that open new tabs/windows + if ( + isModifiedEvent(event) || + !isLeftClickEvent(event) || + isTargetBlank(event) + ) { + return; + } + + // Prevent regular link behavior, which causes a browser refresh. + event.preventDefault(); + + // Push the route to the history. + navigate(to); + } + + // Generate the correct link href (with basename accounted for) + const href = useHref(to); + + return ; +} diff --git a/ui/src/components/FeaturesInServiceDisplay.tsx b/ui/src/components/FeaturesInServiceDisplay.tsx index bec2550a5d3..63dd447c6f3 100644 --- a/ui/src/components/FeaturesInServiceDisplay.tsx +++ b/ui/src/components/FeaturesInServiceDisplay.tsx @@ -28,10 +28,7 @@ const FeaturesInServiceList = ({ featureViews }: FeatureViewsListInterace) => { field: "featureViewName", render: (name: string) => { return ( - + {name} ); diff --git a/ui/src/components/FeaturesListDisplay.tsx b/ui/src/components/FeaturesListDisplay.tsx index 2a0628b0f56..61f57b478d6 100644 --- a/ui/src/components/FeaturesListDisplay.tsx +++ b/ui/src/components/FeaturesListDisplay.tsx @@ -21,8 +21,7 @@ const FeaturesList = ({ field: "name", render: (item: string) => ( {item} diff --git a/ui/src/components/ObjectsCountStats.tsx b/ui/src/components/ObjectsCountStats.tsx index eff3f8a2ca7..bf1dd2dc9dd 100644 --- a/ui/src/components/ObjectsCountStats.tsx +++ b/ui/src/components/ObjectsCountStats.tsx @@ -55,7 +55,7 @@ const ObjectsCountStats = () => { navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-service`)} + onClick={() => navigate(`/p/${projectName}/feature-service`)} description="Feature Services→" title={data.featureServices} reverse @@ -65,7 +65,7 @@ const ObjectsCountStats = () => { navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-view`)} + onClick={() => navigate(`/p/${projectName}/feature-view`)} title={data.featureViews} reverse /> @@ -74,7 +74,7 @@ const ObjectsCountStats = () => { navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/entity`)} + onClick={() => navigate(`/p/${projectName}/entity`)} title={data.entities} reverse /> @@ -83,7 +83,7 @@ const ObjectsCountStats = () => { navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/data-source`)} + onClick={() => navigate(`/p/${projectName}/data-source`)} title={data.dataSources} reverse /> diff --git a/ui/src/components/ProjectSelector.test.tsx b/ui/src/components/ProjectSelector.test.tsx index fc5b3c68400..dfaaab7f626 100644 --- a/ui/src/components/ProjectSelector.test.tsx +++ b/ui/src/components/ProjectSelector.test.tsx @@ -40,7 +40,7 @@ test("in a full App render, it shows the right initial project", async () => { name: "Top Level", }); - within(topLevelNavigation).getByDisplayValue("Credit Score Project"); + await within(topLevelNavigation).findByDisplayValue("Credit Score Project"); expect(options.length).toBe(1); diff --git a/ui/src/components/ProjectSelector.tsx b/ui/src/components/ProjectSelector.tsx index edbcf9d98fe..1bb7ebf85a7 100644 --- a/ui/src/components/ProjectSelector.tsx +++ b/ui/src/components/ProjectSelector.tsx @@ -22,7 +22,7 @@ const ProjectSelector = () => { const basicSelectId = useGeneratedHtmlId({ prefix: "basicSelect" }); const onChange = (e: React.ChangeEvent) => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${e.target.value}`); + navigate(`/p/${e.target.value}`); }; return ( diff --git a/ui/src/hacks/RouteAdapter.ts b/ui/src/hacks/RouteAdapter.ts deleted file mode 100644 index e7743c9d90b..00000000000 --- a/ui/src/hacks/RouteAdapter.ts +++ /dev/null @@ -1,39 +0,0 @@ -import React from "react"; -import { Location } from "history"; -import { - useLocation, - useNavigate, - Location as RouterLocation, -} from "react-router-dom"; - -// via: https://github.com/pbeshai/use-query-params/issues/196#issuecomment-996893750 -interface RouteAdapterProps { - children: React.FunctionComponent<{ - history: { - replace(location: Location): void; - push(location: Location): void; - }; - location: RouterLocation; - }>; -} - -// Via: https://github.com/pbeshai/use-query-params/blob/cd44e7fb3394620f757bfb09ff57b7f296d9a5e6/examples/react-router-6/src/index.js#L36 -const RouteAdapter = ({ children }: RouteAdapterProps) => { - const navigate = useNavigate(); - const location = useLocation(); - - const adaptedHistory = React.useMemo( - () => ({ - replace(location: Location) { - navigate(location, { replace: true, state: location.state }); - }, - push(location: Location) { - navigate(location, { replace: false, state: location.state }); - }, - }), - [navigate] - ); - return children && children({ history: adaptedHistory, location }); -}; - -export default RouteAdapter; diff --git a/ui/src/index.tsx b/ui/src/index.tsx index 04eda8a1ba4..9cca508fcae 100644 --- a/ui/src/index.tsx +++ b/ui/src/index.tsx @@ -96,16 +96,7 @@ root.render( { - return res.json(); - }) - }} + feastUIConfigs={{ tabsRegistry }} /> ); diff --git a/ui/src/mocks/handlers.ts b/ui/src/mocks/handlers.ts index 39f30b62a6d..dd25b6cd3fc 100644 --- a/ui/src/mocks/handlers.ts +++ b/ui/src/mocks/handlers.ts @@ -1,35 +1,25 @@ -import { rest } from "msw"; -import {readFileSync} from 'fs'; +import { http, HttpResponse } from "msw"; +import { readFileSync } from 'fs'; import path from "path"; const registry = readFileSync(path.resolve(__dirname, "../../public/registry.db")); -const projectsListWithDefaultProject = rest.get( - "/projects-list.json", - (req, res, ctx) => { - return res( - ctx.status(200), - ctx.json({ - default: "credit_score_project", - projects: [ - { - name: "Credit Score Project", - description: - "Project for credit scoring team and associated models.", - id: "credit_score_project", - registryPath: "/registry.pb", - }, - ], - }) - ); - } +const projectsListWithDefaultProject = http.get("/projects-list.json", () => + HttpResponse.json({ + default: "credit_score_project", + projects: [ + { + name: "Credit Score Project", + description: "Project for credit scoring team and associated models.", + id: "credit_score_project", + registryPath: "/registry.pb", + }, + ], + }) ); -const creditHistoryRegistry = rest.get("/registry.pb", (req, res, ctx) => { - return res( - ctx.status(200), - ctx.set('Content-Type', 'application/octet-stream'), - ctx.body(registry)); -}); +const creditHistoryRegistry = http.get("/registry.pb", () => + HttpResponse.arrayBuffer(registry.buffer) +); export { projectsListWithDefaultProject, creditHistoryRegistry }; diff --git a/ui/src/pages/RootProjectSelectionPage.tsx b/ui/src/pages/RootProjectSelectionPage.tsx index 5e19b6606b8..fb488e714bc 100644 --- a/ui/src/pages/RootProjectSelectionPage.tsx +++ b/ui/src/pages/RootProjectSelectionPage.tsx @@ -21,12 +21,12 @@ const RootProjectSelectionPage = () => { useEffect(() => { if (data && data.default) { // If a default is set, redirect there. - navigate(`${process.env.PUBLIC_URL || ""}/p/${data.default}`); + navigate(`/p/${data.default}`); } if (data && data.projects.length === 1) { // If there is only one project, redirect there. - navigate(`${process.env.PUBLIC_URL || ""}/p/${data.projects[0].id}`); + navigate(`/p/${data.projects[0].id}`); } }, [data, navigate]); @@ -38,7 +38,7 @@ const RootProjectSelectionPage = () => { title={`${item.name}`} description={item?.description || ""} onClick={() => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${item.id}`); + navigate(`/p/${item.id}`); }} /> diff --git a/ui/src/pages/Sidebar.tsx b/ui/src/pages/Sidebar.tsx index dac02709ba6..44cde07e79d 100644 --- a/ui/src/pages/Sidebar.tsx +++ b/ui/src/pages/Sidebar.tsx @@ -1,7 +1,7 @@ import React, { useContext, useState } from "react"; import { EuiIcon, EuiSideNav, htmlIdGenerator } from "@elastic/eui"; -import { useNavigate, useParams } from "react-router-dom"; +import { Link, useParams } from "react-router-dom"; import { useMatchSubpath } from "../hooks/useMatchSubpath"; import useLoadRegistry from "../queries/useLoadRegistry"; import RegistryPathContext from "../contexts/RegistryPathContext"; @@ -19,8 +19,6 @@ const SideNav = () => { const [isSideNavOpenOnMobile, setisSideNavOpenOnMobile] = useState(false); - const navigate = useNavigate(); - const toggleOpenOnMobile = () => { setisSideNavOpenOnMobile(!isSideNavOpenOnMobile); }; @@ -55,58 +53,48 @@ const SideNav = () => { : "" }`; - const sideNav = [ + const baseUrl = `/p/${projectName}`; + + const sideNav: React.ComponentProps['items'] = [ { name: "Home", id: htmlIdGenerator("basicExample")(), - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/`); - }, + renderItem: props => , items: [ { name: dataSourcesLabel, id: htmlIdGenerator("dataSources")(), icon: , - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/data-source`); - }, - isSelected: useMatchSubpath("data-source"), + renderItem: props => , + isSelected: useMatchSubpath(`${baseUrl}/data-source`), }, { name: entitiesLabel, id: htmlIdGenerator("entities")(), icon: , - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/entity`); - }, - isSelected: useMatchSubpath("entity"), + renderItem: props => , + isSelected: useMatchSubpath(`${baseUrl}/entity`), }, { name: featureViewsLabel, id: htmlIdGenerator("featureView")(), icon: , - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-view`); - }, - isSelected: useMatchSubpath("feature-view"), + renderItem: props => , + isSelected: useMatchSubpath(`${baseUrl}/feature-view`), }, { name: featureServicesLabel, id: htmlIdGenerator("featureService")(), icon: , - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-service`); - }, - isSelected: useMatchSubpath("feature-service"), + renderItem: props => , + isSelected: useMatchSubpath(`${baseUrl}/feature-service`), }, { name: savedDatasetsLabel, id: htmlIdGenerator("savedDatasets")(), icon: , - onClick: () => { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/data-set`); - }, - isSelected: useMatchSubpath("data-set"), + renderItem: props => , + isSelected: useMatchSubpath(`${baseUrl}/data-set`), }, ], }, diff --git a/ui/src/pages/data-sources/DataSourcesListingTable.tsx b/ui/src/pages/data-sources/DataSourcesListingTable.tsx index e4f06d6bd0a..fd1ff73deb7 100644 --- a/ui/src/pages/data-sources/DataSourcesListingTable.tsx +++ b/ui/src/pages/data-sources/DataSourcesListingTable.tsx @@ -20,10 +20,7 @@ const DatasourcesListingTable = ({ sortable: true, render: (name: string) => { return ( - + {name} ); diff --git a/ui/src/pages/entities/EntitiesListingTable.tsx b/ui/src/pages/entities/EntitiesListingTable.tsx index baf4ddb8e47..06190409b04 100644 --- a/ui/src/pages/entities/EntitiesListingTable.tsx +++ b/ui/src/pages/entities/EntitiesListingTable.tsx @@ -20,10 +20,7 @@ const EntitiesListingTable = ({ entities }: EntitiesListingTableProps) => { sortable: true, render: (name: string) => { return ( - + {name} ); diff --git a/ui/src/pages/entities/FeatureViewEdgesList.tsx b/ui/src/pages/entities/FeatureViewEdgesList.tsx index 8a0b6164b49..3419bfcb4b7 100644 --- a/ui/src/pages/entities/FeatureViewEdgesList.tsx +++ b/ui/src/pages/entities/FeatureViewEdgesList.tsx @@ -53,10 +53,7 @@ const FeatureViewEdgesList = ({ fvNames }: FeatureViewEdgesListInterace) => { field: "", render: ({ name }: { name: string }) => { return ( - + {name} ); diff --git a/ui/src/pages/feature-services/FeatureServiceListingTable.tsx b/ui/src/pages/feature-services/FeatureServiceListingTable.tsx index 13ffa764092..69d4d1f969d 100644 --- a/ui/src/pages/feature-services/FeatureServiceListingTable.tsx +++ b/ui/src/pages/feature-services/FeatureServiceListingTable.tsx @@ -30,10 +30,7 @@ const FeatureServiceListingTable = ({ field: "spec.name", render: (name: string) => { return ( - + {name} ); diff --git a/ui/src/pages/feature-services/FeatureServiceOverviewTab.tsx b/ui/src/pages/feature-services/FeatureServiceOverviewTab.tsx index 4d3d350f084..fcb1dc018b3 100644 --- a/ui/src/pages/feature-services/FeatureServiceOverviewTab.tsx +++ b/ui/src/pages/feature-services/FeatureServiceOverviewTab.tsx @@ -109,7 +109,7 @@ const FeatureServiceOverviewTab = () => { tags={data.spec.tags} createLink={(key, value) => { return ( - `${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-service?` + + `/p/${projectName}/feature-service?` + encodeSearchQueryString(`${key}:${value}`) ); }} @@ -133,7 +133,7 @@ const FeatureServiceOverviewTab = () => { color="primary" onClick={() => { navigate( - `${process.env.PUBLIC_URL || ""}/p/${projectName}/entity/${entity.name}` + `/p/${projectName}/entity/${entity.name}` ); }} onClickAriaLabel={entity.name} diff --git a/ui/src/pages/feature-views/ConsumingFeatureServicesList.tsx b/ui/src/pages/feature-views/ConsumingFeatureServicesList.tsx index bb9961c19ca..603a4d96ba4 100644 --- a/ui/src/pages/feature-views/ConsumingFeatureServicesList.tsx +++ b/ui/src/pages/feature-views/ConsumingFeatureServicesList.tsx @@ -18,10 +18,7 @@ const ConsumingFeatureServicesList = ({ field: "", render: ({ name }: { name: string }) => { return ( - + {name} ); diff --git a/ui/src/pages/feature-views/FeatureViewListingTable.tsx b/ui/src/pages/feature-views/FeatureViewListingTable.tsx index ff1a31c4162..02756492c91 100644 --- a/ui/src/pages/feature-views/FeatureViewListingTable.tsx +++ b/ui/src/pages/feature-views/FeatureViewListingTable.tsx @@ -31,10 +31,7 @@ const FeatureViewListingTable = ({ sortable: true, render: (name: string, item: genericFVType) => { return ( - + {name} {(item.type === "ondemand" && ondemand) || (item.type === "stream" && stream)} ); diff --git a/ui/src/pages/feature-views/RegularFeatureViewOverviewTab.tsx b/ui/src/pages/feature-views/RegularFeatureViewOverviewTab.tsx index cde4f46d4ed..3bbb906e05b 100644 --- a/ui/src/pages/feature-views/RegularFeatureViewOverviewTab.tsx +++ b/ui/src/pages/feature-views/RegularFeatureViewOverviewTab.tsx @@ -96,7 +96,7 @@ const RegularFeatureViewOverviewTab = ({ { - navigate(`${process.env.PUBLIC_URL || ""}/p/${projectName}/entity/${entity}`); + navigate(`/p/${projectName}/entity/${entity}`); }} onClickAriaLabel={entity} data-test-sub="testExample1" @@ -134,7 +134,7 @@ const RegularFeatureViewOverviewTab = ({ tags={data.spec.tags} createLink={(key, value) => { return ( - `${process.env.PUBLIC_URL || ""}/p/${projectName}/feature-view?` + + `/p/${projectName}/feature-view?` + encodeSearchQueryString(`${key}:${value}`) ); }} diff --git a/ui/src/pages/feature-views/StreamFeatureViewOverviewTab.tsx b/ui/src/pages/feature-views/StreamFeatureViewOverviewTab.tsx index b4514a5edd5..9aff3d59f3f 100644 --- a/ui/src/pages/feature-views/StreamFeatureViewOverviewTab.tsx +++ b/ui/src/pages/feature-views/StreamFeatureViewOverviewTab.tsx @@ -96,8 +96,7 @@ const StreamFeatureViewOverviewTab = ({ {inputGroup?.name} diff --git a/ui/src/pages/feature-views/components/FeatureViewProjectionDisplayPanel.tsx b/ui/src/pages/feature-views/components/FeatureViewProjectionDisplayPanel.tsx index 2a68cc49b51..104ef0f93be 100644 --- a/ui/src/pages/feature-views/components/FeatureViewProjectionDisplayPanel.tsx +++ b/ui/src/pages/feature-views/components/FeatureViewProjectionDisplayPanel.tsx @@ -31,8 +31,7 @@ const FeatureViewProjectionDisplayPanel = (featureViewProjection: RequestDataDis {featureViewProjection?.featureViewName} diff --git a/ui/src/pages/feature-views/components/RequestDataDisplayPanel.tsx b/ui/src/pages/feature-views/components/RequestDataDisplayPanel.tsx index 8ec973c3dad..6893dfd6a32 100644 --- a/ui/src/pages/feature-views/components/RequestDataDisplayPanel.tsx +++ b/ui/src/pages/feature-views/components/RequestDataDisplayPanel.tsx @@ -39,8 +39,7 @@ const RequestDataDisplayPanel = ({ {requestDataSource?.name} diff --git a/ui/src/pages/features/FeatureOverviewTab.tsx b/ui/src/pages/features/FeatureOverviewTab.tsx index cc7879b0383..eb101fe3955 100644 --- a/ui/src/pages/features/FeatureOverviewTab.tsx +++ b/ui/src/pages/features/FeatureOverviewTab.tsx @@ -63,8 +63,8 @@ const FeatureOverviewTab = () => { FeatureView + to={`/p/${projectName}/feature-view/${FeatureViewName}`} + > {FeatureViewName} diff --git a/ui/src/pages/saved-data-sets/DatasetsListingTable.tsx b/ui/src/pages/saved-data-sets/DatasetsListingTable.tsx index af794a35f98..7b73e9cd6dc 100644 --- a/ui/src/pages/saved-data-sets/DatasetsListingTable.tsx +++ b/ui/src/pages/saved-data-sets/DatasetsListingTable.tsx @@ -19,10 +19,7 @@ const DatasetsListingTable = ({ datasets }: DatasetsListingTableProps) => { sortable: true, render: (name: string) => { return ( - + {name} ); diff --git a/ui/src/queries/useLoadRegistry.ts b/ui/src/queries/useLoadRegistry.ts index be8ab65a8cd..88274b47131 100644 --- a/ui/src/queries/useLoadRegistry.ts +++ b/ui/src/queries/useLoadRegistry.ts @@ -52,7 +52,7 @@ const useLoadRegistry = (url: string) => { // }); return { - project: objects.projectMetadata[0].project!, + project: objects.projects[0].spec?.name!, objects, mergedFVMap, mergedFVList, diff --git a/ui/src/setupTests.ts b/ui/src/setupTests.ts index 8f2609b7b3e..f30351b9164 100644 --- a/ui/src/setupTests.ts +++ b/ui/src/setupTests.ts @@ -3,3 +3,7 @@ // expect(element).toHaveTextContent(/react/i) // learn more: https://github.com/testing-library/jest-dom import '@testing-library/jest-dom'; +import { BroadcastChannel } from 'worker_threads'; + +// BroadcastChannel is missing from @happy-dom/jest-environment globals +Object.assign(global, { BroadcastChannel }); diff --git a/ui/src/test-utils.tsx b/ui/src/test-utils.tsx index c180b01872c..0130686252d 100644 --- a/ui/src/test-utils.tsx +++ b/ui/src/test-utils.tsx @@ -2,8 +2,8 @@ import React from "react"; import { render, RenderOptions } from "@testing-library/react"; import { QueryClient, QueryClientProvider } from "react-query"; import { QueryParamProvider } from "use-query-params"; +import { ReactRouter6Adapter } from 'use-query-params/adapters/react-router-6'; import { MemoryRouter as Router } from "react-router-dom"; -import RouteAdapter from "./hacks/RouteAdapter"; interface ProvidersProps { children: React.ReactNode; @@ -14,10 +14,12 @@ const queryClient = new QueryClient(); const AllTheProviders = ({ children }: ProvidersProps) => { return ( - - + + {children} diff --git a/ui/src/utils/timestamp.ts b/ui/src/utils/timestamp.ts index 869d24870f0..4432545457c 100644 --- a/ui/src/utils/timestamp.ts +++ b/ui/src/utils/timestamp.ts @@ -1,9 +1,9 @@ -import long from 'long'; +import Long from 'long'; import { google } from '../protos'; export function toDate(ts: google.protobuf.ITimestamp) { var seconds: number; - if (ts.seconds instanceof long) { + if (ts.seconds instanceof Long) { seconds = ts.seconds.low } else { seconds = ts.seconds!; diff --git a/ui/yarn.lock b/ui/yarn.lock index 90ba33269eb..32dcfe3996b 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -40,6 +40,15 @@ "@babel/highlight" "^7.25.9" picocolors "^1.0.0" +"@babel/code-frame@^7.26.2": + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" + integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== + dependencies: + "@babel/helper-validator-identifier" "^7.25.9" + js-tokens "^4.0.0" + picocolors "^1.0.0" + "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.7", "@babel/compat-data@^7.25.8": version "7.25.8" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.8.tgz#0376e83df5ab0eb0da18885c0140041f0747a402" @@ -50,7 +59,12 @@ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.9.tgz#24b01c5db6a3ebf85661b4fb4a946a9bccc72ac8" integrity sha512-yD+hEuJ/+wAJ4Ox2/rpNv5HIuPG82x3ZlQvYVn8iYCprdxzE7P1udpGF1jyjQVBU4dgznN+k2h103vxZ7NdPyw== -"@babel/core@^7.1.0", "@babel/core@^7.11.1", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.8.0": +"@babel/compat-data@^7.26.5": + version "7.26.5" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.5.tgz#df93ac37f4417854130e21d72c66ff3d4b897fc7" + integrity sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg== + +"@babel/core@^7.1.0", "@babel/core@^7.11.1", "@babel/core@^7.12.3", "@babel/core@^7.16.0": version "7.25.8" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.8.tgz#a57137d2a51bbcffcfaeba43cb4dd33ae3e0e1c6" integrity sha512-Oixnb+DzmRT30qu9d3tJSQkxuygWm32DFykT4bRoORPa9hZ/L4KhVB/XiRm6KG+roIEM7DBQlmg27kw2HZkdZg== @@ -71,6 +85,27 @@ json5 "^2.2.3" semver "^6.3.1" +"@babel/core@^7.11.6", "@babel/core@^7.23.9": + version "7.26.7" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.7.tgz#0439347a183b97534d52811144d763a17f9d2b24" + integrity sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.26.2" + "@babel/generator" "^7.26.5" + "@babel/helper-compilation-targets" "^7.26.5" + "@babel/helper-module-transforms" "^7.26.0" + "@babel/helpers" "^7.26.7" + "@babel/parser" "^7.26.7" + "@babel/template" "^7.25.9" + "@babel/traverse" "^7.26.7" + "@babel/types" "^7.26.7" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + "@babel/core@^7.21.3", "@babel/core@^7.25.8": version "7.25.9" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.9.tgz#855a4cddcec4158f3f7afadacdab2a7de8af7434" @@ -121,6 +156,17 @@ "@jridgewell/trace-mapping" "^0.3.25" jsesc "^3.0.2" +"@babel/generator@^7.26.5": + version "7.26.5" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.5.tgz#e44d4ab3176bbcaf78a5725da5f1dc28802a9458" + integrity sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw== + dependencies: + "@babel/parser" "^7.26.5" + "@babel/types" "^7.26.5" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^3.0.2" + "@babel/helper-annotate-as-pure@^7.18.6", "@babel/helper-annotate-as-pure@^7.25.7": version "7.25.7" resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.7.tgz#63f02dbfa1f7cb75a9bdb832f300582f30bb8972" @@ -173,6 +219,17 @@ lru-cache "^5.1.1" semver "^6.3.1" +"@babel/helper-compilation-targets@^7.26.5": + version "7.26.5" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz#75d92bb8d8d51301c0d49e52a65c9a7fe94514d8" + integrity sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA== + dependencies: + "@babel/compat-data" "^7.26.5" + "@babel/helper-validator-option" "^7.25.9" + browserslist "^4.24.0" + lru-cache "^5.1.1" + semver "^6.3.1" + "@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.21.0", "@babel/helper-create-class-features-plugin@^7.25.7": version "7.25.7" resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.7.tgz#5d65074c76cae75607421c00d6bd517fe1892d6b" @@ -280,6 +337,15 @@ "@babel/helper-validator-identifier" "^7.25.9" "@babel/traverse" "^7.25.9" +"@babel/helper-module-transforms@^7.26.0": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" + integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== + dependencies: + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + "@babel/traverse" "^7.25.9" + "@babel/helper-optimise-call-expression@^7.25.7": version "7.25.7" resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.7.tgz#1de1b99688e987af723eed44fa7fc0ee7b97d77a" @@ -436,6 +502,14 @@ "@babel/template" "^7.25.9" "@babel/types" "^7.25.9" +"@babel/helpers@^7.26.7": + version "7.26.7" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.7.tgz#fd1d2a7c431b6e39290277aacfd8367857c576a4" + integrity sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A== + dependencies: + "@babel/template" "^7.25.9" + "@babel/types" "^7.26.7" + "@babel/highlight@^7.25.7": version "7.25.7" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.25.7.tgz#20383b5f442aa606e7b5e3043b0b1aafe9f37de5" @@ -463,6 +537,13 @@ dependencies: "@babel/types" "^7.25.8" +"@babel/parser@^7.23.9", "@babel/parser@^7.26.5", "@babel/parser@^7.26.7": + version "7.26.7" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.7.tgz#e114cd099e5f7d17b05368678da0fb9f69b3385c" + integrity sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w== + dependencies: + "@babel/types" "^7.26.7" + "@babel/parser@^7.25.9": version "7.25.9" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.25.9.tgz#8fcaa079ac7458facfddc5cd705cc8005e4d3817" @@ -704,7 +785,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.7" -"@babel/plugin-syntax-jsx@^7.25.9": +"@babel/plugin-syntax-jsx@^7.25.9", "@babel/plugin-syntax-jsx@^7.7.2": version "7.25.9" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz#a34313a178ea56f1951599b929c1ceacee719290" integrity sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA== @@ -1869,7 +1950,7 @@ "@babel/plugin-transform-modules-commonjs" "^7.25.9" "@babel/plugin-transform-typescript" "^7.25.9" -"@babel/runtime@^7.0.0", "@babel/runtime@^7.12.13", "@babel/runtime@^7.7.6", "@babel/runtime@^7.9.2": +"@babel/runtime@^7.0.0", "@babel/runtime@^7.12.13", "@babel/runtime@^7.9.2": version "7.16.7" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.16.7.tgz#03ff99f64106588c9c403c6ecb8c3bafbbdff1fa" integrity sha512-9E9FJowqAsytyOY6LG+1KuueckRL+aQW+mKvXRXnuFGyRAyepJPmEo9vgMfXUA6O9u3IeEdv9MAkppFcaQwogQ== @@ -1915,7 +1996,7 @@ "@babel/parser" "^7.25.9" "@babel/types" "^7.25.9" -"@babel/traverse@^7.25.7", "@babel/traverse@^7.7.2": +"@babel/traverse@^7.25.7": version "7.25.7" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.25.7.tgz#83e367619be1cab8e4f2892ef30ba04c26a40fa8" integrity sha512-jatJPT1Zjqvh/1FyJs6qAHL+Dzb7sTb+xr7Q+gM1b+1oBsMsQQ4FkVKb6dFlJvLlVssqkRzV05Jzervt9yhnzg== @@ -1941,6 +2022,19 @@ debug "^4.3.1" globals "^11.1.0" +"@babel/traverse@^7.26.7": + version "7.26.7" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.7.tgz#99a0a136f6a75e7fb8b0a1ace421e0b25994b8bb" + integrity sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA== + dependencies: + "@babel/code-frame" "^7.26.2" + "@babel/generator" "^7.26.5" + "@babel/parser" "^7.26.7" + "@babel/template" "^7.25.9" + "@babel/types" "^7.26.7" + debug "^4.3.1" + globals "^11.1.0" + "@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.25.7", "@babel/types@^7.25.8", "@babel/types@^7.3.3", "@babel/types@^7.4.4": version "7.25.8" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.25.8.tgz#5cf6037258e8a9bcad533f4979025140cb9993e1" @@ -1958,6 +2052,14 @@ "@babel/helper-string-parser" "^7.25.9" "@babel/helper-validator-identifier" "^7.25.9" +"@babel/types@^7.26.5", "@babel/types@^7.26.7": + version "7.26.7" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.7.tgz#5e2b89c0768e874d4d061961f3a5a153d71dc17a" + integrity sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg== + dependencies: + "@babel/helper-string-parser" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + "@base2/pretty-print-object@1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@base2/pretty-print-object/-/pretty-print-object-1.0.1.tgz#371ba8be66d556812dc7fb169ebc3c08378f69d4" @@ -1968,6 +2070,28 @@ resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== +"@bundled-es-modules/cookie@^2.0.1": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz#b41376af6a06b3e32a15241d927b840a9b4de507" + integrity sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw== + dependencies: + cookie "^0.7.2" + +"@bundled-es-modules/statuses@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz#761d10f44e51a94902c4da48675b71a76cc98872" + integrity sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg== + dependencies: + statuses "^2.0.1" + +"@bundled-es-modules/tough-cookie@^0.1.6": + version "0.1.6" + resolved "https://registry.yarnpkg.com/@bundled-es-modules/tough-cookie/-/tough-cookie-0.1.6.tgz#fa9cd3cedfeecd6783e8b0d378b4a99e52bde5d3" + integrity sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw== + dependencies: + "@types/tough-cookie" "^4.0.5" + tough-cookie "^4.1.4" + "@csstools/normalize.css@*": version "12.1.1" resolved "https://registry.yarnpkg.com/@csstools/normalize.css/-/normalize.css-12.1.1.tgz#f0ad221b7280f3fc814689786fd9ee092776ef8f" @@ -2273,6 +2397,18 @@ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.1.tgz#de633db3ec2ef6a3c89e2f19038063e8a122e2c2" integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q== +"@happy-dom/jest-environment@^16.7.3": + version "16.7.3" + resolved "https://registry.yarnpkg.com/@happy-dom/jest-environment/-/jest-environment-16.7.3.tgz#e32b5622dc838ea4c178a7f999fd46439d607862" + integrity sha512-82cNPOd+jJVVE3dSdlJLy7LXm41wc5rP4sMotMD96jLfL5KKA5s1fSPh0xo2QRHilk6du5seqm1xDXZwyuH++A== + dependencies: + "@jest/environment" "^29.4.0" + "@jest/fake-timers" "^29.4.0" + "@jest/types" "^29.4.0" + happy-dom "^16.7.3" + jest-mock "^29.4.0" + jest-util "^29.4.0" + "@hello-pangea/dnd@^16.6.0": version "16.6.0" resolved "https://registry.yarnpkg.com/@hello-pangea/dnd/-/dnd-16.6.0.tgz#7509639c7bd13f55e537b65a9dcfcd54e7c99ac7" @@ -2305,6 +2441,38 @@ resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz#4a2868d75d6d6963e423bcf90b7fd1be343409d3" integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA== +"@inquirer/confirm@^5.0.0": + version "5.1.4" + resolved "https://registry.yarnpkg.com/@inquirer/confirm/-/confirm-5.1.4.tgz#3e2c9bfdf80331676196d8dbb2261103a67d0e9d" + integrity sha512-EsiT7K4beM5fN5Mz6j866EFA9+v9d5o9VUra3hrg8zY4GHmCS8b616FErbdo5eyKoVotBQkHzMIeeKYsKDStDw== + dependencies: + "@inquirer/core" "^10.1.5" + "@inquirer/type" "^3.0.3" + +"@inquirer/core@^10.1.5": + version "10.1.5" + resolved "https://registry.yarnpkg.com/@inquirer/core/-/core-10.1.5.tgz#7271c177340f77c2e231704227704d8cdf497747" + integrity sha512-/vyCWhET0ktav/mUeBqJRYTwmjFPIKPRYb3COAw7qORULgipGSUO2vL32lQKki3UxDKJ8BvuEbokaoyCA6YlWw== + dependencies: + "@inquirer/figures" "^1.0.10" + "@inquirer/type" "^3.0.3" + ansi-escapes "^4.3.2" + cli-width "^4.1.0" + mute-stream "^2.0.0" + signal-exit "^4.1.0" + wrap-ansi "^6.2.0" + yoctocolors-cjs "^2.1.2" + +"@inquirer/figures@^1.0.10": + version "1.0.10" + resolved "https://registry.yarnpkg.com/@inquirer/figures/-/figures-1.0.10.tgz#e3676a51c9c51aaabcd6ba18a28e82b98417db37" + integrity sha512-Ey6176gZmeqZuY/W/nZiUyvmb1/qInjcpiZjXWi6nON+nxJpD1bxtSoBxNliGISae32n6OwbY+TSXPZ1CfS4bw== + +"@inquirer/type@^3.0.3": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@inquirer/type/-/type-3.0.3.tgz#aa9cb38568f23f772b417c972f6a2d906647a6af" + integrity sha512-I4VIHFxUuY1bshGbXZTxCmhwaaEst9s/lll3ekok+o1Z26/ZUKdx8y1b7lsoG6rtsBDwEGfiBJ2SfirjoISLpg== + "@istanbuljs/load-nyc-config@^1.0.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" @@ -2316,176 +2484,169 @@ js-yaml "^3.13.1" resolve-from "^5.0.0" -"@istanbuljs/schema@^0.1.2": +"@istanbuljs/schema@^0.1.2", "@istanbuljs/schema@^0.1.3": version "0.1.3" resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== -"@jest/console@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/console/-/console-27.5.1.tgz#260fe7239602fe5130a94f1aa386eff54b014bba" - integrity sha512-kZ/tNpS3NXn0mlXXXPNuDZnb4c0oZ20r4K5eemM2k30ZC3G0T02nXUvyhf5YdbXWHPEJLc9qGLxEZ216MdL+Zg== - dependencies: - "@jest/types" "^27.5.1" - "@types/node" "*" - chalk "^4.0.0" - jest-message-util "^27.5.1" - jest-util "^27.5.1" - slash "^3.0.0" - -"@jest/console@^28.1.3": - version "28.1.3" - resolved "https://registry.yarnpkg.com/@jest/console/-/console-28.1.3.tgz#2030606ec03a18c31803b8a36382762e447655df" - integrity sha512-QPAkP5EwKdK/bxIr6C1I4Vs0rm2nHiANzj/Z5X2JQkrZo6IqvC4ldZ9K95tF0HdidhA8Bo6egxSzUFPYKcEXLw== +"@jest/console@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.7.0.tgz#cd4822dbdb84529265c5a2bdb529a3c9cc950ffc" + integrity sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg== dependencies: - "@jest/types" "^28.1.3" + "@jest/types" "^29.6.3" "@types/node" "*" chalk "^4.0.0" - jest-message-util "^28.1.3" - jest-util "^28.1.3" + jest-message-util "^29.7.0" + jest-util "^29.7.0" slash "^3.0.0" -"@jest/core@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/core/-/core-27.5.1.tgz#267ac5f704e09dc52de2922cbf3af9edcd64b626" - integrity sha512-AK6/UTrvQD0Cd24NSqmIA6rKsu0tKIxfiCducZvqxYdmMisOYAsdItspT+fQDQYARPf8XgjAFZi0ogW2agH5nQ== +"@jest/core@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.7.0.tgz#b6cccc239f30ff36609658c5a5e2291757ce448f" + integrity sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg== dependencies: - "@jest/console" "^27.5.1" - "@jest/reporters" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/transform" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/console" "^29.7.0" + "@jest/reporters" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" ansi-escapes "^4.2.1" chalk "^4.0.0" - emittery "^0.8.1" + ci-info "^3.2.0" exit "^0.1.2" graceful-fs "^4.2.9" - jest-changed-files "^27.5.1" - jest-config "^27.5.1" - jest-haste-map "^27.5.1" - jest-message-util "^27.5.1" - jest-regex-util "^27.5.1" - jest-resolve "^27.5.1" - jest-resolve-dependencies "^27.5.1" - jest-runner "^27.5.1" - jest-runtime "^27.5.1" - jest-snapshot "^27.5.1" - jest-util "^27.5.1" - jest-validate "^27.5.1" - jest-watcher "^27.5.1" + jest-changed-files "^29.7.0" + jest-config "^29.7.0" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-resolve-dependencies "^29.7.0" + jest-runner "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + jest-watcher "^29.7.0" micromatch "^4.0.4" - rimraf "^3.0.0" + pretty-format "^29.7.0" slash "^3.0.0" strip-ansi "^6.0.0" -"@jest/environment@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-27.5.1.tgz#d7425820511fe7158abbecc010140c3fd3be9c74" - integrity sha512-/WQjhPJe3/ghaol/4Bq480JKXV/Rfw8nQdN7f41fM8VDHLcxKXou6QyXAh3EFr9/bVG3x74z1NWDkP87EiY8gA== +"@jest/environment@^29.4.0", "@jest/environment@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7" + integrity sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw== dependencies: - "@jest/fake-timers" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" - jest-mock "^27.5.1" + jest-mock "^29.7.0" -"@jest/fake-timers@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-27.5.1.tgz#76979745ce0579c8a94a4678af7a748eda8ada74" - integrity sha512-/aPowoolwa07k7/oM3aASneNeBGCmGQsc3ugN4u6s4C/+s5M64MFo/+djTdiwcbQlRfFElGuDXWzaWj6QgKObQ== +"@jest/expect-utils@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.7.0.tgz#023efe5d26a8a70f21677d0a1afc0f0a44e3a1c6" + integrity sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA== dependencies: - "@jest/types" "^27.5.1" - "@sinonjs/fake-timers" "^8.0.1" + jest-get-type "^29.6.3" + +"@jest/expect@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.7.0.tgz#76a3edb0cb753b70dfbfe23283510d3d45432bf2" + integrity sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ== + dependencies: + expect "^29.7.0" + jest-snapshot "^29.7.0" + +"@jest/fake-timers@^29.4.0", "@jest/fake-timers@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.7.0.tgz#fd91bf1fffb16d7d0d24a426ab1a47a49881a565" + integrity sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ== + dependencies: + "@jest/types" "^29.6.3" + "@sinonjs/fake-timers" "^10.0.2" "@types/node" "*" - jest-message-util "^27.5.1" - jest-mock "^27.5.1" - jest-util "^27.5.1" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-util "^29.7.0" -"@jest/globals@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-27.5.1.tgz#7ac06ce57ab966566c7963431cef458434601b2b" - integrity sha512-ZEJNB41OBQQgGzgyInAv0UUfDDj3upmHydjieSxFvTRuZElrx7tXg/uVQ5hYVEwiXs3+aMsAeEc9X7xiSKCm4Q== +"@jest/globals@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.7.0.tgz#8d9290f9ec47ff772607fa864ca1d5a2efae1d4d" + integrity sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ== dependencies: - "@jest/environment" "^27.5.1" - "@jest/types" "^27.5.1" - expect "^27.5.1" + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/types" "^29.6.3" + jest-mock "^29.7.0" -"@jest/reporters@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-27.5.1.tgz#ceda7be96170b03c923c37987b64015812ffec04" - integrity sha512-cPXh9hWIlVJMQkVk84aIvXuBB4uQQmFqZiacloFuGiP3ah1sbCxCosidXFDfqG8+6fO1oR2dTJTlsOy4VFmUfw== +"@jest/reporters@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.7.0.tgz#04b262ecb3b8faa83b0b3d321623972393e8f4c7" + integrity sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg== dependencies: "@bcoe/v8-coverage" "^0.2.3" - "@jest/console" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/transform" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/console" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" "@types/node" "*" chalk "^4.0.0" collect-v8-coverage "^1.0.0" exit "^0.1.2" - glob "^7.1.2" + glob "^7.1.3" graceful-fs "^4.2.9" istanbul-lib-coverage "^3.0.0" - istanbul-lib-instrument "^5.1.0" + istanbul-lib-instrument "^6.0.0" istanbul-lib-report "^3.0.0" istanbul-lib-source-maps "^4.0.0" istanbul-reports "^3.1.3" - jest-haste-map "^27.5.1" - jest-resolve "^27.5.1" - jest-util "^27.5.1" - jest-worker "^27.5.1" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + jest-worker "^29.7.0" slash "^3.0.0" - source-map "^0.6.0" string-length "^4.0.1" - terminal-link "^2.0.0" - v8-to-istanbul "^8.1.0" + strip-ansi "^6.0.0" + v8-to-istanbul "^9.0.1" -"@jest/schemas@^28.1.3": - version "28.1.3" - resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-28.1.3.tgz#ad8b86a66f11f33619e3d7e1dcddd7f2d40ff905" - integrity sha512-/l/VWsdt/aBXgjshLWOFyFt3IVdYypu5y2Wn2rOO1un6nkqIn8SLXzgIMYXFyYsRWDyF5EthmKJMIdJvk08grg== +"@jest/schemas@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03" + integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA== dependencies: - "@sinclair/typebox" "^0.24.1" + "@sinclair/typebox" "^0.27.8" -"@jest/source-map@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-27.5.1.tgz#6608391e465add4205eae073b55e7f279e04e8cf" - integrity sha512-y9NIHUYF3PJRlHk98NdC/N1gl88BL08aQQgu4k4ZopQkCw9t9cV8mtl3TV8b/YCB8XaVTFrmUTAJvjsntDireg== +"@jest/source-map@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.3.tgz#d90ba772095cf37a34a5eb9413f1b562a08554c4" + integrity sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw== dependencies: + "@jridgewell/trace-mapping" "^0.3.18" callsites "^3.0.0" graceful-fs "^4.2.9" - source-map "^0.6.0" - -"@jest/test-result@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-27.5.1.tgz#56a6585fa80f7cdab72b8c5fc2e871d03832f5bb" - integrity sha512-EW35l2RYFUcUQxFJz5Cv5MTOxlJIQs4I7gxzi2zVU7PJhOwfYq1MdC5nhSmYjX1gmMmLPvB3sIaC+BkcHRBfag== - dependencies: - "@jest/console" "^27.5.1" - "@jest/types" "^27.5.1" - "@types/istanbul-lib-coverage" "^2.0.0" - collect-v8-coverage "^1.0.0" -"@jest/test-result@^28.1.3": - version "28.1.3" - resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-28.1.3.tgz#5eae945fd9f4b8fcfce74d239e6f725b6bf076c5" - integrity sha512-kZAkxnSE+FqE8YjW8gNuoVkkC9I7S1qmenl8sGcDOLropASP+BkcGKwhXoyqQuGOGeYY0y/ixjrd/iERpEXHNg== +"@jest/test-result@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.7.0.tgz#8db9a80aa1a097bb2262572686734baed9b1657c" + integrity sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA== dependencies: - "@jest/console" "^28.1.3" - "@jest/types" "^28.1.3" + "@jest/console" "^29.7.0" + "@jest/types" "^29.6.3" "@types/istanbul-lib-coverage" "^2.0.0" collect-v8-coverage "^1.0.0" -"@jest/test-sequencer@^27.5.1": - version "27.5.1" - resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-27.5.1.tgz#4057e0e9cea4439e544c6353c6affe58d095745b" - integrity sha512-LCheJF7WB2+9JuCS7VB/EmGIdQuhtqjRNI9A43idHv3E4KltCTsPsLxvdaubFHSYwY/fNjMWjl6vNRhDiN7vpQ== +"@jest/test-sequencer@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz#6cef977ce1d39834a3aea887a1726628a6f072ce" + integrity sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw== dependencies: - "@jest/test-result" "^27.5.1" + "@jest/test-result" "^29.7.0" graceful-fs "^4.2.9" - jest-haste-map "^27.5.1" - jest-runtime "^27.5.1" + jest-haste-map "^29.7.0" + slash "^3.0.0" "@jest/transform@^27.5.1": version "27.5.1" @@ -2508,6 +2669,27 @@ source-map "^0.6.1" write-file-atomic "^3.0.0" +"@jest/transform@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.7.0.tgz#df2dd9c346c7d7768b8a06639994640c642e284c" + integrity sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw== + dependencies: + "@babel/core" "^7.11.6" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" + babel-plugin-istanbul "^6.1.1" + chalk "^4.0.0" + convert-source-map "^2.0.0" + fast-json-stable-stringify "^2.1.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + micromatch "^4.0.4" + pirates "^4.0.4" + slash "^3.0.0" + write-file-atomic "^4.0.2" + "@jest/types@^27.5.1": version "27.5.1" resolved "https://registry.yarnpkg.com/@jest/types/-/types-27.5.1.tgz#3c79ec4a8ba61c170bf937bcf9e98a9df175ec80" @@ -2519,12 +2701,12 @@ "@types/yargs" "^16.0.0" chalk "^4.0.0" -"@jest/types@^28.1.3": - version "28.1.3" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-28.1.3.tgz#b05de80996ff12512bc5ceb1d208285a7d11748b" - integrity sha512-RyjiyMUZrKz/c+zlMFO1pm70DcIlST8AeWTkoUdZevew44wcNZQHsEVOiCVtgVnlFFD82FPaXycys58cf2muVQ== +"@jest/types@^29.4.0", "@jest/types@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59" + integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw== dependencies: - "@jest/schemas" "^28.1.3" + "@jest/schemas" "^29.6.3" "@types/istanbul-lib-coverage" "^2.0.0" "@types/istanbul-reports" "^3.0.0" "@types/node" "*" @@ -2563,7 +2745,7 @@ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== -"@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": +"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.20", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": version "0.3.25" resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== @@ -2590,25 +2772,17 @@ dependencies: unist-util-visit "^1.4.1" -"@mswjs/cookies@^0.1.7": - version "0.1.7" - resolved "https://registry.yarnpkg.com/@mswjs/cookies/-/cookies-0.1.7.tgz#d334081b2c51057a61c1dd7b76ca3cac02251651" - integrity sha512-bDg1ReMBx+PYDB4Pk7y1Q07Zz1iKIEUWQpkEXiA2lEWg9gvOZ8UBmGXilCEUvyYoRFlmr/9iXTRR69TrgSwX/Q== - dependencies: - "@types/set-cookie-parser" "^2.4.0" - set-cookie-parser "^2.4.6" - -"@mswjs/interceptors@^0.12.7": - version "0.12.7" - resolved "https://registry.yarnpkg.com/@mswjs/interceptors/-/interceptors-0.12.7.tgz#0d1cd4cd31a0f663e0455993951201faa09d0909" - integrity sha512-eGjZ3JRAt0Fzi5FgXiV/P3bJGj0NqsN7vBS0J0FO2AQRQ0jCKQS4lEFm4wvlSgKQNfeuc/Vz6d81VtU3Gkx/zg== +"@mswjs/interceptors@^0.37.0": + version "0.37.5" + resolved "https://registry.yarnpkg.com/@mswjs/interceptors/-/interceptors-0.37.5.tgz#9ce40c56be02b43fcbdb51b63f47e69fc4aaabe6" + integrity sha512-AAwRb5vXFcY4L+FvZ7LZusDuZ0vEe0Zm8ohn1FM6/X7A3bj4mqmkAcGRWuvC2JwSygNwHAAmMnAI73vPHeqsHA== dependencies: - "@open-draft/until" "^1.0.3" - "@xmldom/xmldom" "^0.7.2" - debug "^4.3.2" - headers-utils "^3.0.2" - outvariant "^1.2.0" - strict-event-emitter "^0.2.0" + "@open-draft/deferred-promise" "^2.2.0" + "@open-draft/logger" "^0.3.0" + "@open-draft/until" "^2.0.0" + is-node-process "^1.2.0" + outvariant "^1.4.3" + strict-event-emitter "^0.5.1" "@nicolo-ribaudo/eslint-scope-5-internals@5.1.1-v1": version "5.1.1-v1" @@ -2638,10 +2812,23 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" -"@open-draft/until@^1.0.3": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@open-draft/until/-/until-1.0.3.tgz#db9cc719191a62e7d9200f6e7bab21c5b848adca" - integrity sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q== +"@open-draft/deferred-promise@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz#4a822d10f6f0e316be4d67b4d4f8c9a124b073bd" + integrity sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA== + +"@open-draft/logger@^0.3.0": + version "0.3.0" + resolved "https://registry.yarnpkg.com/@open-draft/logger/-/logger-0.3.0.tgz#2b3ab1242b360aa0adb28b85f5d7da1c133a0954" + integrity sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ== + dependencies: + is-node-process "^1.2.0" + outvariant "^1.4.0" + +"@open-draft/until@^2.0.0", "@open-draft/until@^2.1.0": + version "2.1.0" + resolved "https://registry.yarnpkg.com/@open-draft/until/-/until-2.1.0.tgz#0acf32f470af2ceaf47f095cdecd40d68666efda" + integrity sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg== "@pmmmwh/react-refresh-webpack-plugin@^0.5.3": version "0.5.15" @@ -2709,6 +2896,11 @@ resolved "https://registry.yarnpkg.com/@protobufjs/utf8/-/utf8-1.1.0.tgz#a777360b5b39a1a2e5106f8e858f2fd2d060c570" integrity sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw== +"@remix-run/router@1.21.0": + version "1.21.0" + resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.21.0.tgz#c65ae4262bdcfe415dbd4f64ec87676e4a56e2b5" + integrity sha512-xfSkCAchbdG5PnbrKqFWwia4Bi61nH+wm8wLEqfHDyp7Y3dZzgqS2itV8i4gAq9pC2HsTpwyBC6Ds8VHZ96JlA== + "@rollup/plugin-babel@^5.2.0", "@rollup/plugin-babel@^5.3.1": version "5.3.1" resolved "https://registry.yarnpkg.com/@rollup/plugin-babel/-/plugin-babel-5.3.1.tgz#04bc0608f4aa4b2e4b1aebf284344d0f68fda283" @@ -2804,24 +2996,24 @@ resolved "https://registry.yarnpkg.com/@rushstack/eslint-patch/-/eslint-patch-1.10.4.tgz#427d5549943a9c6fce808e39ea64dbe60d4047f1" integrity sha512-WJgX9nzTqknM393q1QJDJmoW28kUfEnybeTfVNcNAPnIx210RXm2DiXiHzfNPJNIUUb1tJnz/l4QGtJ30PgWmA== -"@sinclair/typebox@^0.24.1": - version "0.24.51" - resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.24.51.tgz#645f33fe4e02defe26f2f5c0410e1c094eac7f5f" - integrity sha512-1P1OROm/rdubP5aFDSZQILU0vrLCJ4fvHt6EoqHEM+2D/G5MK3bIaymUKLit8Js9gbns5UyJnkP/TZROLw4tUA== +"@sinclair/typebox@^0.27.8": + version "0.27.8" + resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" + integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== -"@sinonjs/commons@^1.7.0": - version "1.8.6" - resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.6.tgz#80c516a4dc264c2a69115e7578d62581ff455ed9" - integrity sha512-Ky+XkAkqPZSm3NLBeUng77EBQl3cmeJhITaGHdYH8kjVB+aun3S4XBRti2zt17mtt0mIUDiNxYeoJm6drVvBJQ== +"@sinonjs/commons@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.1.tgz#1029357e44ca901a615585f6d27738dbc89084cd" + integrity sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ== dependencies: type-detect "4.0.8" -"@sinonjs/fake-timers@^8.0.1": - version "8.1.0" - resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-8.1.0.tgz#3fdc2b6cb58935b21bfb8d1625eb1300484316e7" - integrity sha512-OAPJUAtgeINhh/TAlUID4QTs53Njm7xzddaVlEs/SXwgtiD1tW22zAB/W1wdqfrpmikgaWQ9Fw6Ws+hsiRm5Vg== +"@sinonjs/fake-timers@^10.0.2": + version "10.3.0" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz#55fdff1ecab9f354019129daf4df0dd4d923ea66" + integrity sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA== dependencies: - "@sinonjs/commons" "^1.7.0" + "@sinonjs/commons" "^3.0.0" "@surma/rollup-plugin-off-main-thread@^2.2.3": version "2.2.3" @@ -2978,10 +3170,10 @@ resolved "https://registry.yarnpkg.com/@testing-library/user-event/-/user-event-14.5.2.tgz#db7257d727c891905947bd1c1a99da20e03c2ebd" integrity sha512-YAh82Wh4TIrxYLmfGcixwD18oIjyC1pFQC2Y01F2lzV2HTMiYrI0nze0FD0ocB//CKS/7jIUgae+adPqxK5yCQ== -"@tootallnate/once@1": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-1.1.2.tgz#ccb91445360179a04e7fe6aff78c00ffc1eeaf82" - integrity sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw== +"@tootallnate/once@2": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-2.0.0.tgz#f544a148d3ab35801c1f633a7441fd87c2e484bf" + integrity sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A== "@trysound/sax@0.2.0": version "0.2.0" @@ -3019,7 +3211,7 @@ "@babel/parser" "^7.1.0" "@babel/types" "^7.0.0" -"@types/babel__traverse@*", "@types/babel__traverse@^7.0.4", "@types/babel__traverse@^7.0.6": +"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": version "7.20.6" resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.6.tgz#8dc9f0ae0f202c08d8d4dab648912c8d6038e3f7" integrity sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg== @@ -3056,10 +3248,10 @@ dependencies: "@types/node" "*" -"@types/cookie@^0.4.1": - version "0.4.1" - resolved "https://registry.yarnpkg.com/@types/cookie/-/cookie-0.4.1.tgz#bfd02c1f2224567676c1545199f87c3a861d878d" - integrity sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q== +"@types/cookie@^0.6.0": + version "0.6.0" + resolved "https://registry.yarnpkg.com/@types/cookie/-/cookie-0.6.0.tgz#eac397f28bf1d6ae0ae081363eca2f425bedf0d5" + integrity sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA== "@types/eslint@^7.29.0 || ^8.4.1": version "8.56.12" @@ -3134,7 +3326,7 @@ "@types/minimatch" "*" "@types/node" "*" -"@types/graceful-fs@^4.1.2": +"@types/graceful-fs@^4.1.2", "@types/graceful-fs@^4.1.3": version "4.1.9" resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4" integrity sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ== @@ -3173,14 +3365,6 @@ dependencies: "@types/node" "*" -"@types/inquirer@^8.1.3": - version "8.2.10" - resolved "https://registry.yarnpkg.com/@types/inquirer/-/inquirer-8.2.10.tgz#9444dce2d764c35bc5bb4d742598aaa4acb6561b" - integrity sha512-IdD5NmHyVjWM8SHWo/kPBgtzXatwPkfwzyP3fN1jF2g9BWt5WO+8hL2F4o2GKIYsU40PpqeevuUWvkS/roXJkA== - dependencies: - "@types/through" "*" - rxjs "^7.2.0" - "@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": version "2.0.6" resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" @@ -3208,10 +3392,14 @@ jest-diff "^27.0.0" pretty-format "^27.0.0" -"@types/js-levenshtein@^1.1.0": - version "1.1.3" - resolved "https://registry.yarnpkg.com/@types/js-levenshtein/-/js-levenshtein-1.1.3.tgz#a6fd0bdc8255b274e5438e0bfb25f154492d1106" - integrity sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ== +"@types/jsdom@^20.0.0": + version "20.0.1" + resolved "https://registry.yarnpkg.com/@types/jsdom/-/jsdom-20.0.1.tgz#07c14bc19bd2f918c1929541cdaacae894744808" + integrity sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ== + dependencies: + "@types/node" "*" + "@types/tough-cookie" "*" + parse5 "^7.0.0" "@types/json-schema@*", "@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9": version "7.0.15" @@ -3282,12 +3470,12 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.18.tgz#633184f55c322e4fb08612307c274ee6d5ed3154" integrity sha512-m+6nTEOadJZuTPkKR/SYK3A2d7FZrgElol9UP1Kae90VVU4a6mxnPuLiIW1m4Cq4gZ/nWb9GrdVXJCoCazDAbg== -"@types/node@^20.16.13": - version "20.16.13" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.16.13.tgz#148c152d757dc73f8d65f0f6f078f39050b85b0c" - integrity sha512-GjQ7im10B0labo8ZGXDGROUl9k0BNyDgzfGpb4g/cl+4yYDWVKcozANF4FGr4/p0O/rAkQClM6Wiwkije++1Tg== +"@types/node@^22.12.0": + version "22.12.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.12.0.tgz#bf8af3b2af0837b5a62a368756ff2b705ae0048c" + integrity sha512-Fll2FZ1riMjNmlmJOdAyY5pUbkftXslB5DgEzlIuNaiWhXd00FhWxVC/r4yV/4wBb9JfImTu+jiSvXTkJ7F/gA== dependencies: - undici-types "~6.19.2" + undici-types "~6.20.0" "@types/numeral@^2.0.5": version "2.0.5" @@ -3304,11 +3492,6 @@ resolved "https://registry.yarnpkg.com/@types/parse5/-/parse5-5.0.3.tgz#e7b5aebbac150f8b5fdd4a46e7f0bd8e65e19109" integrity sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw== -"@types/prettier@^2.1.5": - version "2.7.3" - resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.3.tgz#3e51a17e291d01d17d3fc61422015a933af7a08f" - integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA== - "@types/prismjs@*": version "1.26.0" resolved "https://registry.yarnpkg.com/@types/prismjs/-/prismjs-1.26.0.tgz#a1c3809b0ad61c62cac6d4e0c56d610c910b7654" @@ -3413,13 +3596,6 @@ "@types/node" "*" "@types/send" "*" -"@types/set-cookie-parser@^2.4.0": - version "2.4.10" - resolved "https://registry.yarnpkg.com/@types/set-cookie-parser/-/set-cookie-parser-2.4.10.tgz#ad3a807d6d921db9720621ea3374c5d92020bcbc" - integrity sha512-GGmQVGpQWUe5qglJozEjZV/5dyxbOOZ0LHe/lqyWssB88Y4svNfst0uqBVscdDeIKl5Jy5+aPSvy7mI9tYRguw== - dependencies: - "@types/node" "*" - "@types/sockjs@^0.3.33": version "0.3.36" resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.36.tgz#ce322cf07bcc119d4cbf7f88954f3a3bd0f67535" @@ -3432,17 +3608,20 @@ resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw== +"@types/statuses@^2.0.4": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@types/statuses/-/statuses-2.0.5.tgz#f61ab46d5352fd73c863a1ea4e1cef3b0b51ae63" + integrity sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A== + "@types/stylis@4.2.5": version "4.2.5" resolved "https://registry.yarnpkg.com/@types/stylis/-/stylis-4.2.5.tgz#1daa6456f40959d06157698a653a9ab0a70281df" integrity sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw== -"@types/through@*": - version "0.0.33" - resolved "https://registry.yarnpkg.com/@types/through/-/through-0.0.33.tgz#14ebf599320e1c7851e7d598149af183c6b9ea56" - integrity sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ== - dependencies: - "@types/node" "*" +"@types/tough-cookie@*", "@types/tough-cookie@^4.0.5": + version "4.0.5" + resolved "https://registry.yarnpkg.com/@types/tough-cookie/-/tough-cookie-4.0.5.tgz#cb6e2a691b70cb177c6e3ae9c1d2e8b2ea8cd304" + integrity sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA== "@types/trusted-types@^2.0.2": version "2.0.7" @@ -3702,11 +3881,6 @@ "@webassemblyjs/ast" "1.12.1" "@xtuc/long" "4.2.2" -"@xmldom/xmldom@^0.7.2": - version "0.7.13" - resolved "https://registry.yarnpkg.com/@xmldom/xmldom/-/xmldom-0.7.13.tgz#ff34942667a4e19a9f4a0996a76814daac364cf3" - integrity sha512-lm2GW5PkosIzccsaZIz7tp8cPADSIlIHWDFTR1N0SzfinhhYgeIQjFMz4rYzanCScr3DqQLeomUDArp6MWKm+g== - "@xtuc/ieee754@^1.2.0": version "1.2.0" resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" @@ -3717,7 +3891,7 @@ resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== -abab@^2.0.3, abab@^2.0.5: +abab@^2.0.5, abab@^2.0.6: version "2.0.6" resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.6.tgz#41b80f2c871d19686216b82309231cfd3cb3d291" integrity sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA== @@ -3730,13 +3904,13 @@ accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.8: mime-types "~2.1.34" negotiator "0.6.3" -acorn-globals@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-6.0.0.tgz#46cdd39f0f8ff08a876619b55f5ac8a6dc770b45" - integrity sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg== +acorn-globals@^7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-7.0.1.tgz#0dbf05c44fa7c94332914c02066d5beff62c40c3" + integrity sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q== dependencies: - acorn "^7.1.1" - acorn-walk "^7.1.1" + acorn "^8.1.0" + acorn-walk "^8.0.2" acorn-import-attributes@^1.9.5: version "1.9.5" @@ -3748,17 +3922,19 @@ acorn-jsx@^5.3.2: resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== -acorn-walk@^7.1.1: - version "7.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc" - integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== +acorn-walk@^8.0.2: + version "8.3.4" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.4.tgz#794dd169c3977edf4ba4ea47583587c5866236b7" + integrity sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g== + dependencies: + acorn "^8.11.0" -acorn@^7.1.1: - version "7.4.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" - integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== +acorn@^8.1.0, acorn@^8.11.0, acorn@^8.8.1: + version "8.14.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" + integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== -acorn@^8.2.4, acorn@^8.7.1, acorn@^8.8.2, acorn@^8.9.0: +acorn@^8.7.1, acorn@^8.8.2, acorn@^8.9.0: version "8.13.0" resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.13.0.tgz#2a30d670818ad16ddd6a35d3842dacec9e5d7ca3" integrity sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w== @@ -3822,13 +3998,18 @@ ajv@^8.0.0, ajv@^8.6.0, ajv@^8.9.0: json-schema-traverse "^1.0.0" require-from-string "^2.0.2" -ansi-escapes@^4.2.1, ansi-escapes@^4.3.1: +ansi-escapes@^4.2.1, ansi-escapes@^4.3.2: version "4.3.2" resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== dependencies: type-fest "^0.21.3" +ansi-escapes@^6.0.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-6.2.1.tgz#76c54ce9b081dad39acec4b5d53377913825fb0f" + integrity sha512-4nJ3yixlEthEJ9Rk4vPcdBRkZvQZlYyu8j4/Mqz5sgIkddmEnH2Yj2ZrnP9S3tQOvSNRUIgVNF/1yPpRAGNRig== + ansi-html-community@^0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/ansi-html-community/-/ansi-html-community-0.0.8.tgz#69fbc4d6ccbe383f9736934ae34c3f8290f1bf41" @@ -4072,7 +4253,7 @@ axobject-query@^4.1.0: resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-4.1.0.tgz#28768c76d0e3cff21bc62a9e2d0b6ac30042a1ee" integrity sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ== -babel-jest@^27.4.2, babel-jest@^27.5.1: +babel-jest@^27.4.2: version "27.5.1" resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-27.5.1.tgz#a1bf8d61928edfefd21da27eb86a695bfd691444" integrity sha512-cdQ5dXjGRd0IBRATiQ4mZGlGlRE8kJpjPOixdNRdT+m3UcNqmYWN6rK6nvtXYfY3D76cb8s/O1Ss8ea24PIwcg== @@ -4086,6 +4267,19 @@ babel-jest@^27.4.2, babel-jest@^27.5.1: graceful-fs "^4.2.9" slash "^3.0.0" +babel-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.7.0.tgz#f4369919225b684c56085998ac63dbd05be020d5" + integrity sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg== + dependencies: + "@jest/transform" "^29.7.0" + "@types/babel__core" "^7.1.14" + babel-plugin-istanbul "^6.1.1" + babel-preset-jest "^29.6.3" + chalk "^4.0.0" + graceful-fs "^4.2.9" + slash "^3.0.0" + babel-loader@^8.2.3: version "8.4.1" resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.4.1.tgz#6ccb75c66e62c3b144e1c5f2eaec5b8f6c08c675" @@ -4117,6 +4311,16 @@ babel-plugin-jest-hoist@^27.5.1: "@types/babel__core" "^7.0.0" "@types/babel__traverse" "^7.0.6" +babel-plugin-jest-hoist@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz#aadbe943464182a8922c3c927c3067ff40d24626" + integrity sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg== + dependencies: + "@babel/template" "^7.3.3" + "@babel/types" "^7.3.3" + "@types/babel__core" "^7.1.14" + "@types/babel__traverse" "^7.0.6" + babel-plugin-macros@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" @@ -4189,6 +4393,14 @@ babel-preset-jest@^27.5.1: babel-plugin-jest-hoist "^27.5.1" babel-preset-current-node-syntax "^1.0.0" +babel-preset-jest@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz#fa05fa510e7d493896d7b0dd2033601c840f171c" + integrity sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA== + dependencies: + babel-plugin-jest-hoist "^29.6.3" + babel-preset-current-node-syntax "^1.0.0" + babel-preset-react-app@^10.0.1: version "10.0.1" resolved "https://registry.yarnpkg.com/babel-preset-react-app/-/babel-preset-react-app-10.0.1.tgz#ed6005a20a24f2c88521809fa9aea99903751584" @@ -4221,11 +4433,6 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -base64-js@^1.3.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" - integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== - batch@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" @@ -4257,15 +4464,6 @@ binary-extensions@^2.0.0: resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== -bl@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" - integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== - dependencies: - buffer "^5.5.0" - inherits "^2.0.4" - readable-stream "^3.4.0" - bluebird@^3.7.2: version "3.7.2" resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" @@ -4338,11 +4536,6 @@ broadcast-channel@^3.4.1: rimraf "3.0.2" unload "2.2.0" -browser-process-hrtime@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626" - integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow== - browserslist@^4.0.0, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.21.4, browserslist@^4.23.3, browserslist@^4.24.0: version "4.24.0" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.0.tgz#a1325fe4bc80b64fda169629fc01b3d6cecd38d4" @@ -4365,14 +4558,6 @@ buffer-from@^1.0.0: resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== -buffer@^5.5.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" - integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== - dependencies: - base64-js "^1.3.1" - ieee754 "^1.1.13" - builtin-modules@^3.1.0: version "3.3.0" resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-3.3.0.tgz#cae62812b89801e9656336e46223e030386be7b6" @@ -4459,14 +4644,6 @@ ccount@^1.0.0: resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.1.0.tgz#246687debb6014735131be8abab2d93898f8d043" integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg== -chalk@4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" - integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" @@ -4484,7 +4661,7 @@ chalk@^3.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" -chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.1, chalk@^4.1.2: +chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -4492,6 +4669,11 @@ chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.1, chalk@^4.1.2: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^5.2.0: + version "5.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.4.1.tgz#1b48bf0963ec158dce2aacf69c093ae2dd2092d8" + integrity sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w== + char-regex@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" @@ -4522,11 +4704,6 @@ character-reference-invalid@^1.0.0: resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560" integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg== -chardet@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" - integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== - check-types@^11.2.3: version "11.2.3" resolved "https://registry.yarnpkg.com/check-types/-/check-types-11.2.3.tgz#1ffdf68faae4e941fce252840b1787b8edc93b71" @@ -4579,31 +4756,10 @@ clean-css@^5.2.2: dependencies: source-map "~0.6.0" -cli-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" - integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== - dependencies: - restore-cursor "^3.1.0" - -cli-spinners@^2.5.0: - version "2.9.2" - resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.9.2.tgz#1773a8f4b9c4d6ac31563df53b3fc1d79462fe41" - integrity sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg== - -cli-width@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" - integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== - -cliui@^7.0.2: - version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" - integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^7.0.0" +cli-width@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-4.1.0.tgz#42daac41d3c254ef38ad8ac037672130173691c5" + integrity sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ== cliui@^8.0.1: version "8.0.1" @@ -4614,11 +4770,6 @@ cliui@^8.0.1: strip-ansi "^6.0.1" wrap-ansi "^7.0.0" -clone@^1.0.2: - version "1.0.4" - resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" - integrity sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg== - co@^4.6.0: version "4.6.0" resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" @@ -4757,7 +4908,7 @@ content-type@~1.0.4, content-type@~1.0.5: resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== -convert-source-map@^1.4.0, convert-source-map@^1.5.0, convert-source-map@^1.6.0, convert-source-map@^1.7.0: +convert-source-map@^1.4.0, convert-source-map@^1.5.0, convert-source-map@^1.7.0: version "1.9.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A== @@ -4777,10 +4928,10 @@ cookie@0.7.1: resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.7.1.tgz#2f73c42142d5d5cf71310a74fc4ae61670e5dbc9" integrity sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w== -cookie@^0.4.1: - version "0.4.2" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.2.tgz#0e41f24de5ecf317947c82fc789e06a884824432" - integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA== +cookie@^0.7.2: + version "0.7.2" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.7.2.tgz#556369c472a2ba910f2979891b526b3436237ed7" + integrity sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w== core-js-compat@^3.38.0, core-js-compat@^3.38.1: version "3.38.1" @@ -4836,6 +4987,19 @@ cosmiconfig@^8.1.3: parse-json "^5.2.0" path-type "^4.0.0" +create-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/create-jest/-/create-jest-29.7.0.tgz#a355c5b3cb1e1af02ba177fe7afd7feee49a5320" + integrity sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q== + dependencies: + "@jest/types" "^29.6.3" + chalk "^4.0.0" + exit "^0.1.2" + graceful-fs "^4.2.9" + jest-config "^29.7.0" + jest-util "^29.7.0" + prompts "^2.0.1" + cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" @@ -5050,10 +5214,10 @@ csso@^5.0.5: dependencies: css-tree "~2.2.0" -cssom@^0.4.4: - version "0.4.4" - resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.4.4.tgz#5a66cf93d2d0b661d80bf6a44fb65f5c2e4e0a10" - integrity sha512-p3pvU7r1MyyqbTk+WbNJIgJjG2VmTIaB10rI93LzVPrmDJKkzKYMtxxyAvQXR/NS6otuzveI7+7BBq3SjBS2mw== +cssom@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.5.0.tgz#d254fa92cd8b6fbd83811b9fbaed34663cc17c36" + integrity sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw== cssom@~0.3.6: version "0.3.8" @@ -5082,14 +5246,14 @@ damerau-levenshtein@^1.0.8: resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz#b43d286ccbd36bc5b2f7ed41caf2d0aba1f8a6e7" integrity sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA== -data-urls@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-2.0.0.tgz#156485a72963a970f5d5821aaf642bef2bf2db9b" - integrity sha512-X5eWTSXO/BJmpdIKCRuKUgSCgAN0OwliVK3yPKbwIWU1Tdw5BRajxlzMidvh+gwko9AfQ9zIj52pzF91Q3YAvQ== +data-urls@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-3.0.2.tgz#9cf24a477ae22bcef5cd5f6f0bfbc1d2d3be9143" + integrity sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ== dependencies: - abab "^2.0.3" - whatwg-mimetype "^2.3.0" - whatwg-url "^8.0.0" + abab "^2.0.6" + whatwg-mimetype "^3.0.0" + whatwg-url "^11.0.0" data-view-buffer@^1.0.1: version "1.0.1" @@ -5125,10 +5289,10 @@ debug@2.6.9, debug@^2.6.0: dependencies: ms "2.0.0" -debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: - version "4.3.7" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" - integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== +debug@4: + version "4.4.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" + integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== dependencies: ms "^2.1.3" @@ -5139,20 +5303,27 @@ debug@^3.2.7: dependencies: ms "^2.1.1" -decimal.js@^10.2.1: - version "10.4.3" - resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.4.3.tgz#1044092884d245d1b7f65725fa4ad4c6f781cc23" - integrity sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA== +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + +decimal.js@^10.4.2: + version "10.5.0" + resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.5.0.tgz#0f371c7cf6c4898ce0afb09836db73cd82010f22" + integrity sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw== decode-uri-component@^0.2.0: version "0.2.2" resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.2.tgz#e69dbe25d37941171dd540e024c444cd5188e1e9" integrity sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ== -dedent@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c" - integrity sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA== +dedent@^1.0.0: + version "1.5.3" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.3.tgz#99aee19eb9bae55a67327717b6e848d0bf777e5a" + integrity sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ== deep-equal@^2.0.5: version "2.2.3" @@ -5195,13 +5366,6 @@ default-gateway@^6.0.3: dependencies: execa "^5.0.0" -defaults@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a" - integrity sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A== - dependencies: - clone "^1.0.2" - define-data-property@^1.0.1, define-data-property@^1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" @@ -5273,11 +5437,16 @@ detect-port-alt@^1.1.6: address "^1.0.1" debug "^2.6.0" -diff-sequences@^27.4.0, diff-sequences@^27.5.1: +diff-sequences@^27.4.0: version "27.5.1" resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-27.5.1.tgz#eaecc0d327fd68c8d9672a1e64ab8dccb2ef5327" integrity sha512-k1gCAXAsNgLwEL+Y8Wvl+M6oEFj5bgazfZULpS5CneoPPXRaCCW7dm+q21Ky2VEE5X+VeRDBVg1Pcvvsr4TtNQ== +diff-sequences@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921" + integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q== + dir-glob@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" @@ -5346,12 +5515,12 @@ domelementtype@^2.0.1, domelementtype@^2.2.0, domelementtype@^2.3.0: resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== -domexception@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/domexception/-/domexception-2.0.1.tgz#fb44aefba793e1574b0af6aed2801d057529f304" - integrity sha512-yxJ2mFy/sibVQlu5qHjOkf9J3K6zgmCxgJ94u2EdvDOV09H+32LtRswEcUsmUWN72pVLOEnTSRaIVVzVQgS0dg== +domexception@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/domexception/-/domexception-4.0.0.tgz#4ad1be56ccadc86fc76d033353999a8037d03673" + integrity sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw== dependencies: - webidl-conversions "^5.0.0" + webidl-conversions "^7.0.0" domhandler@^4.0.0, domhandler@^4.2.0, domhandler@^4.3.1: version "4.3.1" @@ -5425,15 +5594,10 @@ electron-to-chromium@^1.5.28: resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.41.tgz#eae1ba6c49a1a61d84cf8263351d3513b2bcc534" integrity sha512-dfdv/2xNjX0P8Vzme4cfzHqnPm5xsZXwsolTYr0eyW18IUmNyG08vL+fttvinTfhKfIKdRoqkDIC9e9iWQCNYQ== -emittery@^0.10.2: - version "0.10.2" - resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.10.2.tgz#902eec8aedb8c41938c46e9385e9db7e03182933" - integrity sha512-aITqOwnLanpHLNXZJENbOgjUBeHocD+xsSJmNrjovKBW5HbSpW3d1pEls7GFQPUWXiwG9+0P4GtHfEqC/4M0Iw== - -emittery@^0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.8.1.tgz#bb23cc86d03b30aa75a7f734819dee2e1ba70860" - integrity sha512-uDfvUjVrfGJJhymx/kz6prltenw1u7WrCg1oa94zYY8xxVpLLUu045LAT0dhDZdXG58/EpPL/5kA180fQ/qudg== +emittery@^0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" + integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ== emoji-regex@^8.0.0: version "8.0.0" @@ -5478,7 +5642,7 @@ entities@^2.0.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@^4.2.0, entities@^4.4.0: +entities@^4.2.0, entities@^4.4.0, entities@^4.5.0: version "4.5.0" resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== @@ -5969,7 +6133,7 @@ eventemitter3@^4.0.0: resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== -events@^3.2.0, events@^3.3.0: +events@^3.2.0: version "3.3.0" resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== @@ -5994,15 +6158,16 @@ exit@^0.1.2: resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ== -expect@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/expect/-/expect-27.5.1.tgz#83ce59f1e5bdf5f9d2b94b61d2050db48f3fef74" - integrity sha512-E1q5hSUG2AmYQwQJ041nvgpkODHQvB+RKlB4IYdru6uJsyFTRyZAP463M+1lINorwbqAmUggi6+WwkD8lCS/Dw== +expect@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/expect/-/expect-29.7.0.tgz#578874590dcb3214514084c08115d8aee61e11bc" + integrity sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw== dependencies: - "@jest/types" "^27.5.1" - jest-get-type "^27.5.1" - jest-matcher-utils "^27.5.1" - jest-message-util "^27.5.1" + "@jest/expect-utils" "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" express@^4.17.3: version "4.21.1" @@ -6046,15 +6211,6 @@ extend@^3.0.0: resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== -external-editor@^3.0.3: - version "3.1.0" - resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" - integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== - dependencies: - chardet "^0.7.0" - iconv-lite "^0.4.24" - tmp "^0.0.33" - fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" @@ -6114,13 +6270,6 @@ fb-watchman@^2.0.0: dependencies: bser "2.1.1" -figures@^3.0.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" - integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== - dependencies: - escape-string-regexp "^1.0.5" - file-entry-cache@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" @@ -6269,10 +6418,10 @@ fork-ts-checker-webpack-plugin@^6.5.0: semver "^7.3.2" tapable "^1.0.0" -form-data@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.2.tgz#83ad9ced7c03feaad97e293d6f6091011e1659c8" - integrity sha512-sJe+TQb2vIaIyO783qN6BlMYWMw3WBOHA1Ay2qxsnjuafEOQFJ2JakedOQirT6D5XPRxDvS7AHYyem9fTpb4LQ== +form-data@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.1.tgz#ba1076daaaa5bfd7e99c1a6cb02aa0a5cff90d48" + integrity sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" @@ -6430,7 +6579,7 @@ glob-to-regexp@^0.4.1: resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz#c75297087c851b9a578bd217dd59a92f59fe546e" integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== -glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: +glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: version "7.2.3" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== @@ -6532,10 +6681,10 @@ graphemer@^1.4.0: resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag== -graphql@^15.5.1: - version "15.9.0" - resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.9.0.tgz#4e8ca830cfd30b03d44d3edd9cac2b0690304b53" - integrity sha512-GCOQdvm7XxV1S4U4CGrsdlEN37245eC8P9zaYCMr6K1BG0IPGy5lUwmJsEOGyl1GD6HXjOtl2keCP9asRBwNvA== +graphql@^16.8.1: + version "16.10.0" + resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.10.0.tgz#24c01ae0af6b11ea87bf55694429198aaa8e220c" + integrity sha512-AjqGKbDGUFRKIRCP9tCKiIGHyriz2oHEbPIbEtcSLSs4YjReZOIPQQWek4+6hjw62H9QShXHyaGivGiYVLeYFQ== gzip-size@^6.0.0: version "6.0.0" @@ -6549,6 +6698,14 @@ handle-thing@^2.0.0: resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.1.tgz#857f79ce359580c340d43081cc648970d0bb234e" integrity sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg== +happy-dom@^16.7.3: + version "16.7.3" + resolved "https://registry.yarnpkg.com/happy-dom/-/happy-dom-16.7.3.tgz#8f3033265c4e0d31bc7e68f8678676bb92f0e7f7" + integrity sha512-76uiE9jCpC849cOyYZ8YBROpPcstW/hwCKoQYd3aiZaxHeR9zdjpup4z7qYEWbt+lY8Rb3efW2gmrckyoBftKg== + dependencies: + webidl-conversions "^7.0.0" + whatwg-mimetype "^3.0.0" + harmony-reflect@^1.4.6: version "1.6.2" resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.2.tgz#31ecbd32e648a34d030d86adb67d4d47547fe710" @@ -6700,10 +6857,10 @@ he@^1.2.0: resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== -headers-utils@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/headers-utils/-/headers-utils-3.0.2.tgz#dfc65feae4b0e34357308aefbcafa99c895e59ef" - integrity sha512-xAxZkM1dRyGV2Ou5bzMxBPNLoRCjcX+ya7KSWybQD2KwLphxsapUVK6x/02o7f4VU6GPSXch9vNY2+gkU8tYWQ== +headers-polyfill@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/headers-polyfill/-/headers-polyfill-4.0.3.tgz#922a0155de30ecc1f785bcf04be77844ca95ad07" + integrity sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ== highlight.js@^10.4.1, highlight.js@~10.7.0: version "10.7.3" @@ -6715,13 +6872,6 @@ highlightjs-vue@^1.0.0: resolved "https://registry.yarnpkg.com/highlightjs-vue/-/highlightjs-vue-1.0.0.tgz#fdfe97fbea6354e70ee44e3a955875e114db086d" integrity sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA== -history@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/history/-/history-5.2.0.tgz#7cdd31cf9bac3c5d31f09c231c9928fad0007b7c" - integrity sha512-uPSF6lAJb3nSePJ43hN3eKj1dTWpN9gMod0ZssbFTIsen+WehTmEadgL+kg78xLJFdRfrrC//SavDzmRVdE+Ig== - dependencies: - "@babel/runtime" "^7.7.6" - hoist-non-react-statics@^3.3.0, hoist-non-react-statics@^3.3.1, hoist-non-react-statics@^3.3.2: version "3.3.2" resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" @@ -6744,12 +6894,12 @@ hpack.js@^2.1.6: readable-stream "^2.0.1" wbuf "^1.1.0" -html-encoding-sniffer@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-2.0.1.tgz#42a6dc4fd33f00281176e8b23759ca4e4fa185f3" - integrity sha512-D5JbOMBIR/TVZkubHT+OyT2705QvogUW4IBn6nHd756OwieSF9aDYFj4dv6HHEVGYbHaLETa3WggZYWWMyy3ZQ== +html-encoding-sniffer@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz#2cb1a8cf0db52414776e5b2a7a04d5dd98158de9" + integrity sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA== dependencies: - whatwg-encoding "^1.0.5" + whatwg-encoding "^2.0.0" html-entities@^2.1.0, html-entities@^2.3.2: version "2.5.2" @@ -6831,12 +6981,12 @@ http-parser-js@>=0.5.1: resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.8.tgz#af23090d9ac4e24573de6f6aecc9d84a48bf20e3" integrity sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q== -http-proxy-agent@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz#8a8c8ef7f5932ccf953c296ca8291b95aa74aa3a" - integrity sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg== +http-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz#5129800203520d434f142bc78ff3c170800f2b43" + integrity sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w== dependencies: - "@tootallnate/once" "1" + "@tootallnate/once" "2" agent-base "6" debug "4" @@ -6860,7 +7010,7 @@ http-proxy@^1.18.1: follow-redirects "^1.0.0" requires-port "^1.0.0" -https-proxy-agent@^5.0.0: +https-proxy-agent@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6" integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA== @@ -6873,14 +7023,14 @@ human-signals@^2.1.0: resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== -iconv-lite@0.4.24, iconv-lite@^0.4.24: +iconv-lite@0.4.24: version "0.4.24" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== dependencies: safer-buffer ">= 2.1.2 < 3" -iconv-lite@^0.6.3: +iconv-lite@0.6.3, iconv-lite@^0.6.3: version "0.6.3" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== @@ -6904,11 +7054,6 @@ identity-obj-proxy@^3.0.0: dependencies: harmony-reflect "^1.4.6" -ieee754@^1.1.13: - version "1.2.1" - resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" - integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== - ignore@^5.1.1, ignore@^5.2.0: version "5.3.2" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" @@ -6953,7 +7098,7 @@ inflight@^1.0.4: once "^1.3.0" wrappy "1" -inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.3: +inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: version "2.0.4" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== @@ -6973,27 +7118,6 @@ inline-style-parser@0.1.1: resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== -inquirer@^8.2.0: - version "8.2.6" - resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-8.2.6.tgz#733b74888195d8d400a67ac332011b5fae5ea562" - integrity sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg== - dependencies: - ansi-escapes "^4.2.1" - chalk "^4.1.1" - cli-cursor "^3.1.0" - cli-width "^3.0.0" - external-editor "^3.0.3" - figures "^3.0.0" - lodash "^4.17.21" - mute-stream "0.0.8" - ora "^5.4.1" - run-async "^2.4.0" - rxjs "^7.5.5" - string-width "^4.1.0" - strip-ansi "^6.0.0" - through "^2.3.6" - wrap-ansi "^6.0.1" - inter-ui@^3.19.3: version "3.19.3" resolved "https://registry.yarnpkg.com/inter-ui/-/inter-ui-3.19.3.tgz#cf4b4b6d30de8d5463e2462588654b325206488c" @@ -7170,11 +7294,6 @@ is-hexadecimal@^1.0.0: resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7" integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw== -is-interactive@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" - integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== - is-map@^2.0.2, is-map@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" @@ -7190,7 +7309,7 @@ is-negative-zero@^2.0.3: resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== -is-node-process@^1.0.1: +is-node-process@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/is-node-process/-/is-node-process-1.2.0.tgz#ea02a1b90ddb3934a19aea414e88edef7e11d134" integrity sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw== @@ -7310,11 +7429,6 @@ is-typedarray@^1.0.0: resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== -is-unicode-supported@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" - integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== - is-weakmap@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" @@ -7372,7 +7486,7 @@ istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756" integrity sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg== -istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0: +istanbul-lib-instrument@^5.0.4: version "5.2.1" resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz#d10c8885c2125574e1c231cacadf955675e1ce3d" integrity sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg== @@ -7383,6 +7497,17 @@ istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0: istanbul-lib-coverage "^3.2.0" semver "^6.3.0" +istanbul-lib-instrument@^6.0.0: + version "6.0.3" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz#fa15401df6c15874bcb2105f773325d78c666765" + integrity sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q== + dependencies: + "@babel/core" "^7.23.9" + "@babel/parser" "^7.23.9" + "@istanbuljs/schema" "^0.1.3" + istanbul-lib-coverage "^3.2.0" + semver "^7.5.4" + istanbul-lib-report@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz#908305bac9a5bd175ac6a74489eafd0fc2445a7d" @@ -7430,85 +7555,83 @@ jake@^10.8.5: filelist "^1.0.4" minimatch "^3.1.2" -jest-changed-files@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-27.5.1.tgz#a348aed00ec9bf671cc58a66fcbe7c3dfd6a68f5" - integrity sha512-buBLMiByfWGCoMsLLzGUUSpAmIAGnbR2KJoMN10ziLhOLvP4e0SlypHnAel8iqQXTrcbmfEY9sSqae5sgUsTvw== +jest-changed-files@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" + integrity sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w== dependencies: - "@jest/types" "^27.5.1" execa "^5.0.0" - throat "^6.0.1" - -jest-circus@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-27.5.1.tgz#37a5a4459b7bf4406e53d637b49d22c65d125ecc" - integrity sha512-D95R7x5UtlMA5iBYsOHFFbMD/GVA4R/Kdq15f7xYWUfWHBto9NYRsOvnSauTgdF+ogCpJ4tyKOXhUifxS65gdw== - dependencies: - "@jest/environment" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/types" "^27.5.1" + jest-util "^29.7.0" + p-limit "^3.1.0" + +jest-circus@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.7.0.tgz#b6817a45fcc835d8b16d5962d0c026473ee3668a" + integrity sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" chalk "^4.0.0" co "^4.6.0" - dedent "^0.7.0" - expect "^27.5.1" + dedent "^1.0.0" is-generator-fn "^2.0.0" - jest-each "^27.5.1" - jest-matcher-utils "^27.5.1" - jest-message-util "^27.5.1" - jest-runtime "^27.5.1" - jest-snapshot "^27.5.1" - jest-util "^27.5.1" - pretty-format "^27.5.1" + jest-each "^29.7.0" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + p-limit "^3.1.0" + pretty-format "^29.7.0" + pure-rand "^6.0.0" slash "^3.0.0" stack-utils "^2.0.3" - throat "^6.0.1" -jest-cli@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-27.5.1.tgz#278794a6e6458ea8029547e6c6cbf673bd30b145" - integrity sha512-Hc6HOOwYq4/74/c62dEE3r5elx8wjYqxY0r0G/nFrLDPMFRu6RA/u8qINOIkvhxG7mMQ5EJsOGfRpI8L6eFUVw== +jest-cli@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.7.0.tgz#5592c940798e0cae677eec169264f2d839a37995" + integrity sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg== dependencies: - "@jest/core" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/core" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" chalk "^4.0.0" + create-jest "^29.7.0" exit "^0.1.2" - graceful-fs "^4.2.9" import-local "^3.0.2" - jest-config "^27.5.1" - jest-util "^27.5.1" - jest-validate "^27.5.1" - prompts "^2.0.1" - yargs "^16.2.0" - -jest-config@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-27.5.1.tgz#5c387de33dca3f99ad6357ddeccd91bf3a0e4a41" - integrity sha512-5sAsjm6tGdsVbW9ahcChPAFCk4IlkQUknH5AvKjuLTSlcO/wCZKyFdn7Rg0EkC+OGgWODEy2hDpWB1PgzH0JNA== - dependencies: - "@babel/core" "^7.8.0" - "@jest/test-sequencer" "^27.5.1" - "@jest/types" "^27.5.1" - babel-jest "^27.5.1" + jest-config "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + yargs "^17.3.1" + +jest-config@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.7.0.tgz#bcbda8806dbcc01b1e316a46bb74085a84b0245f" + integrity sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ== + dependencies: + "@babel/core" "^7.11.6" + "@jest/test-sequencer" "^29.7.0" + "@jest/types" "^29.6.3" + babel-jest "^29.7.0" chalk "^4.0.0" ci-info "^3.2.0" deepmerge "^4.2.2" - glob "^7.1.1" + glob "^7.1.3" graceful-fs "^4.2.9" - jest-circus "^27.5.1" - jest-environment-jsdom "^27.5.1" - jest-environment-node "^27.5.1" - jest-get-type "^27.5.1" - jest-jasmine2 "^27.5.1" - jest-regex-util "^27.5.1" - jest-resolve "^27.5.1" - jest-runner "^27.5.1" - jest-util "^27.5.1" - jest-validate "^27.5.1" + jest-circus "^29.7.0" + jest-environment-node "^29.7.0" + jest-get-type "^29.6.3" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-runner "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" micromatch "^4.0.4" parse-json "^5.2.0" - pretty-format "^27.5.1" + pretty-format "^29.7.0" slash "^3.0.0" strip-json-comments "^3.1.1" @@ -7522,64 +7645,70 @@ jest-diff@^27.0.0: jest-get-type "^27.4.0" pretty-format "^27.4.6" -jest-diff@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-27.5.1.tgz#a07f5011ac9e6643cf8a95a462b7b1ecf6680def" - integrity sha512-m0NvkX55LDt9T4mctTEgnZk3fmEg3NRYutvMPWM/0iPnkFj2wIeF45O1718cMSOFO1vINkqmxqD8vE37uTEbqw== +jest-diff@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.7.0.tgz#017934a66ebb7ecf6f205e84699be10afd70458a" + integrity sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw== dependencies: chalk "^4.0.0" - diff-sequences "^27.5.1" - jest-get-type "^27.5.1" - pretty-format "^27.5.1" + diff-sequences "^29.6.3" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" -jest-docblock@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-27.5.1.tgz#14092f364a42c6108d42c33c8cf30e058e25f6c0" - integrity sha512-rl7hlABeTsRYxKiUfpHrQrG4e2obOiTQWfMEH3PxPjOtdsfLQO4ReWSZaQ7DETm4xu07rl4q/h4zcKXyU0/OzQ== +jest-docblock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.7.0.tgz#8fddb6adc3cdc955c93e2a87f61cfd350d5d119a" + integrity sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g== dependencies: detect-newline "^3.0.0" -jest-each@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-27.5.1.tgz#5bc87016f45ed9507fed6e4702a5b468a5b2c44e" - integrity sha512-1Ff6p+FbhT/bXQnEouYy00bkNSY7OUpfIcmdl8vZ31A1UUaurOLPA8a8BbJOF2RDUElwJhmeaV7LnagI+5UwNQ== +jest-each@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.7.0.tgz#162a9b3f2328bdd991beaabffbb74745e56577d1" + integrity sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ== dependencies: - "@jest/types" "^27.5.1" + "@jest/types" "^29.6.3" chalk "^4.0.0" - jest-get-type "^27.5.1" - jest-util "^27.5.1" - pretty-format "^27.5.1" - -jest-environment-jsdom@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-27.5.1.tgz#ea9ccd1fc610209655a77898f86b2b559516a546" - integrity sha512-TFBvkTC1Hnnnrka/fUb56atfDtJ9VMZ94JkjTbggl1PEpwrYtUBKMezB3inLmWqQsXYLcMwNoDQwoBTAvFfsfw== - dependencies: - "@jest/environment" "^27.5.1" - "@jest/fake-timers" "^27.5.1" - "@jest/types" "^27.5.1" + jest-get-type "^29.6.3" + jest-util "^29.7.0" + pretty-format "^29.7.0" + +jest-environment-jsdom@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz#d206fa3551933c3fd519e5dfdb58a0f5139a837f" + integrity sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/jsdom" "^20.0.0" "@types/node" "*" - jest-mock "^27.5.1" - jest-util "^27.5.1" - jsdom "^16.6.0" - -jest-environment-node@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-27.5.1.tgz#dedc2cfe52fab6b8f5714b4808aefa85357a365e" - integrity sha512-Jt4ZUnxdOsTGwSRAfKEnE6BcwsSPNOijjwifq5sDFSA2kesnXTvNqKHYgM0hDq3549Uf/KzdXNYn4wMZJPlFLw== - dependencies: - "@jest/environment" "^27.5.1" - "@jest/fake-timers" "^27.5.1" - "@jest/types" "^27.5.1" + jest-mock "^29.7.0" + jest-util "^29.7.0" + jsdom "^20.0.0" + +jest-environment-node@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" + integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" - jest-mock "^27.5.1" - jest-util "^27.5.1" + jest-mock "^29.7.0" + jest-util "^29.7.0" -jest-get-type@^27.4.0, jest-get-type@^27.5.1: +jest-get-type@^27.4.0: version "27.5.1" resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-27.5.1.tgz#3cd613c507b0f7ace013df407a1c1cd578bcb4f1" integrity sha512-2KY95ksYSaK7DMBWQn6dQz3kqAf3BB64y2udeG+hv4KfSOb9qwcYQstTJc1KCbsix+wLZWZYN8t7nwX3GOBLRw== +jest-get-type@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.6.3.tgz#36f499fdcea197c1045a127319c0481723908fd1" + integrity sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw== + jest-haste-map@^27.5.1: version "27.5.1" resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-27.5.1.tgz#9fd8bd7e7b4fa502d9c6164c5640512b4e811e7f" @@ -7600,84 +7729,66 @@ jest-haste-map@^27.5.1: optionalDependencies: fsevents "^2.3.2" -jest-jasmine2@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-27.5.1.tgz#a037b0034ef49a9f3d71c4375a796f3b230d1ac4" - integrity sha512-jtq7VVyG8SqAorDpApwiJJImd0V2wv1xzdheGHRGyuT7gZm6gG47QEskOlzsN1PG/6WNaCo5pmwMHDf3AkG2pQ== +jest-haste-map@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.7.0.tgz#3c2396524482f5a0506376e6c858c3bbcc17b104" + integrity sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA== dependencies: - "@jest/environment" "^27.5.1" - "@jest/source-map" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/types" "^29.6.3" + "@types/graceful-fs" "^4.1.3" "@types/node" "*" - chalk "^4.0.0" - co "^4.6.0" - expect "^27.5.1" - is-generator-fn "^2.0.0" - jest-each "^27.5.1" - jest-matcher-utils "^27.5.1" - jest-message-util "^27.5.1" - jest-runtime "^27.5.1" - jest-snapshot "^27.5.1" - jest-util "^27.5.1" - pretty-format "^27.5.1" - throat "^6.0.1" - -jest-leak-detector@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-27.5.1.tgz#6ec9d54c3579dd6e3e66d70e3498adf80fde3fb8" - integrity sha512-POXfWAMvfU6WMUXftV4HolnJfnPOGEu10fscNCA76KBpRRhcMN2c8d3iT2pxQS3HLbA+5X4sOUPzYO2NUyIlHQ== - dependencies: - jest-get-type "^27.5.1" - pretty-format "^27.5.1" + anymatch "^3.0.3" + fb-watchman "^2.0.0" + graceful-fs "^4.2.9" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + jest-worker "^29.7.0" + micromatch "^4.0.4" + walker "^1.0.8" + optionalDependencies: + fsevents "^2.3.2" -jest-matcher-utils@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-27.5.1.tgz#9c0cdbda8245bc22d2331729d1091308b40cf8ab" - integrity sha512-z2uTx/T6LBaCoNWNFWwChLBKYxTMcGBRjAt+2SbP929/Fflb9aa5LGma654Rz8z9HLxsrUaYzxE9T/EFIL/PAw== +jest-leak-detector@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz#5b7ec0dadfdfec0ca383dc9aa016d36b5ea4c728" + integrity sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw== dependencies: - chalk "^4.0.0" - jest-diff "^27.5.1" - jest-get-type "^27.5.1" - pretty-format "^27.5.1" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" -jest-message-util@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-27.5.1.tgz#bdda72806da10d9ed6425e12afff38cd1458b6cf" - integrity sha512-rMyFe1+jnyAAf+NHwTclDz0eAaLkVDdKVHHBFWsBWHnnh5YeJMNWWsv7AbFYXfK3oTqvL7VTWkhNLu1jX24D+g== +jest-matcher-utils@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz#ae8fec79ff249fd592ce80e3ee474e83a6c44f12" + integrity sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g== dependencies: - "@babel/code-frame" "^7.12.13" - "@jest/types" "^27.5.1" - "@types/stack-utils" "^2.0.0" chalk "^4.0.0" - graceful-fs "^4.2.9" - micromatch "^4.0.4" - pretty-format "^27.5.1" - slash "^3.0.0" - stack-utils "^2.0.3" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" -jest-message-util@^28.1.3: - version "28.1.3" - resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-28.1.3.tgz#232def7f2e333f1eecc90649b5b94b0055e7c43d" - integrity sha512-PFdn9Iewbt575zKPf1286Ht9EPoJmYT7P0kY+RibeYZ2XtOr53pDLEFoTWXbd1h4JiGiWpTBC84fc8xMXQMb7g== +jest-message-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.7.0.tgz#8bc392e204e95dfe7564abbe72a404e28e51f7f3" + integrity sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w== dependencies: "@babel/code-frame" "^7.12.13" - "@jest/types" "^28.1.3" + "@jest/types" "^29.6.3" "@types/stack-utils" "^2.0.0" chalk "^4.0.0" graceful-fs "^4.2.9" micromatch "^4.0.4" - pretty-format "^28.1.3" + pretty-format "^29.7.0" slash "^3.0.0" stack-utils "^2.0.3" -jest-mock@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-27.5.1.tgz#19948336d49ef4d9c52021d34ac7b5f36ff967d6" - integrity sha512-K4jKbY1d4ENhbrG2zuPWaQBvDly+iZ2yAW+T1fATN78hc0sInwn7wZB8XtlNnvHug5RMwV897Xm4LqmPM4e2Og== +jest-mock@^29.4.0, jest-mock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.7.0.tgz#4e836cf60e99c6fcfabe9f99d017f3fdd50a6347" + integrity sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw== dependencies: - "@jest/types" "^27.5.1" + "@jest/types" "^29.6.3" "@types/node" "*" + jest-util "^29.7.0" jest-pnp-resolver@^1.2.2: version "1.2.3" @@ -7689,88 +7800,86 @@ jest-regex-util@^27.5.1: resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-27.5.1.tgz#4da143f7e9fd1e542d4aa69617b38e4a78365b95" integrity sha512-4bfKq2zie+x16okqDXjXn9ql2B0dScQu+vcwe4TvFVhkVyuWLqpZrZtXxLLWoXYgn0E87I6r6GRYHF7wFZBUvg== -jest-regex-util@^28.0.0: - version "28.0.2" - resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-28.0.2.tgz#afdc377a3b25fb6e80825adcf76c854e5bf47ead" - integrity sha512-4s0IgyNIy0y9FK+cjoVYoxamT7Zeo7MhzqRGx7YDYmaQn1wucY9rotiGkBzzcMXTtjrCAP/f7f+E0F7+fxPNdw== +jest-regex-util@^29.0.0, jest-regex-util@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.6.3.tgz#4a556d9c776af68e1c5f48194f4d0327d24e8a52" + integrity sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg== -jest-resolve-dependencies@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-27.5.1.tgz#d811ecc8305e731cc86dd79741ee98fed06f1da8" - integrity sha512-QQOOdY4PE39iawDn5rzbIePNigfe5B9Z91GDD1ae/xNDlu9kaat8QQ5EKnNmVWPV54hUdxCVwwj6YMgR2O7IOg== +jest-resolve-dependencies@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz#1b04f2c095f37fc776ff40803dc92921b1e88428" + integrity sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA== dependencies: - "@jest/types" "^27.5.1" - jest-regex-util "^27.5.1" - jest-snapshot "^27.5.1" + jest-regex-util "^29.6.3" + jest-snapshot "^29.7.0" -jest-resolve@^27.4.2, jest-resolve@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-27.5.1.tgz#a2f1c5a0796ec18fe9eb1536ac3814c23617b384" - integrity sha512-FFDy8/9E6CV83IMbDpcjOhumAQPDyETnU2KZ1O98DwTnz8AOBsW/Xv3GySr1mOZdItLR+zDZ7I/UdTFbgSOVCw== +jest-resolve@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.7.0.tgz#64d6a8992dd26f635ab0c01e5eef4399c6bcbc30" + integrity sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA== dependencies: - "@jest/types" "^27.5.1" chalk "^4.0.0" graceful-fs "^4.2.9" - jest-haste-map "^27.5.1" + jest-haste-map "^29.7.0" jest-pnp-resolver "^1.2.2" - jest-util "^27.5.1" - jest-validate "^27.5.1" + jest-util "^29.7.0" + jest-validate "^29.7.0" resolve "^1.20.0" - resolve.exports "^1.1.0" + resolve.exports "^2.0.0" slash "^3.0.0" -jest-runner@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-27.5.1.tgz#071b27c1fa30d90540805c5645a0ec167c7b62e5" - integrity sha512-g4NPsM4mFCOwFKXO4p/H/kWGdJp9V8kURY2lX8Me2drgXqG7rrZAx5kv+5H7wtt/cdFIjhqYx1HrlqWHaOvDaQ== +jest-runner@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.7.0.tgz#809af072d408a53dcfd2e849a4c976d3132f718e" + integrity sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ== dependencies: - "@jest/console" "^27.5.1" - "@jest/environment" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/transform" "^27.5.1" - "@jest/types" "^27.5.1" + "@jest/console" "^29.7.0" + "@jest/environment" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" chalk "^4.0.0" - emittery "^0.8.1" + emittery "^0.13.1" graceful-fs "^4.2.9" - jest-docblock "^27.5.1" - jest-environment-jsdom "^27.5.1" - jest-environment-node "^27.5.1" - jest-haste-map "^27.5.1" - jest-leak-detector "^27.5.1" - jest-message-util "^27.5.1" - jest-resolve "^27.5.1" - jest-runtime "^27.5.1" - jest-util "^27.5.1" - jest-worker "^27.5.1" - source-map-support "^0.5.6" - throat "^6.0.1" - -jest-runtime@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-27.5.1.tgz#4896003d7a334f7e8e4a53ba93fb9bcd3db0a1af" - integrity sha512-o7gxw3Gf+H2IGt8fv0RiyE1+r83FJBRruoA+FXrlHw6xEyBsU8ugA6IPfTdVyA0w8HClpbK+DGJxH59UrNMx8A== - dependencies: - "@jest/environment" "^27.5.1" - "@jest/fake-timers" "^27.5.1" - "@jest/globals" "^27.5.1" - "@jest/source-map" "^27.5.1" - "@jest/test-result" "^27.5.1" - "@jest/transform" "^27.5.1" - "@jest/types" "^27.5.1" + jest-docblock "^29.7.0" + jest-environment-node "^29.7.0" + jest-haste-map "^29.7.0" + jest-leak-detector "^29.7.0" + jest-message-util "^29.7.0" + jest-resolve "^29.7.0" + jest-runtime "^29.7.0" + jest-util "^29.7.0" + jest-watcher "^29.7.0" + jest-worker "^29.7.0" + p-limit "^3.1.0" + source-map-support "0.5.13" + +jest-runtime@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.7.0.tgz#efecb3141cf7d3767a3a0cc8f7c9990587d3d817" + integrity sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/globals" "^29.7.0" + "@jest/source-map" "^29.6.3" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" chalk "^4.0.0" cjs-module-lexer "^1.0.0" collect-v8-coverage "^1.0.0" - execa "^5.0.0" glob "^7.1.3" graceful-fs "^4.2.9" - jest-haste-map "^27.5.1" - jest-message-util "^27.5.1" - jest-mock "^27.5.1" - jest-regex-util "^27.5.1" - jest-resolve "^27.5.1" - jest-snapshot "^27.5.1" - jest-util "^27.5.1" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" slash "^3.0.0" strip-bom "^4.0.0" @@ -7782,33 +7891,31 @@ jest-serializer@^27.5.1: "@types/node" "*" graceful-fs "^4.2.9" -jest-snapshot@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-27.5.1.tgz#b668d50d23d38054a51b42c4039cab59ae6eb6a1" - integrity sha512-yYykXI5a0I31xX67mgeLw1DZ0bJB+gpq5IpSuCAoyDi0+BhgU/RIrL+RTzDmkNTchvDFWKP8lp+w/42Z3us5sA== +jest-snapshot@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.7.0.tgz#c2c574c3f51865da1bb329036778a69bf88a6be5" + integrity sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw== dependencies: - "@babel/core" "^7.7.2" + "@babel/core" "^7.11.6" "@babel/generator" "^7.7.2" + "@babel/plugin-syntax-jsx" "^7.7.2" "@babel/plugin-syntax-typescript" "^7.7.2" - "@babel/traverse" "^7.7.2" - "@babel/types" "^7.0.0" - "@jest/transform" "^27.5.1" - "@jest/types" "^27.5.1" - "@types/babel__traverse" "^7.0.4" - "@types/prettier" "^2.1.5" + "@babel/types" "^7.3.3" + "@jest/expect-utils" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" babel-preset-current-node-syntax "^1.0.0" chalk "^4.0.0" - expect "^27.5.1" + expect "^29.7.0" graceful-fs "^4.2.9" - jest-diff "^27.5.1" - jest-get-type "^27.5.1" - jest-haste-map "^27.5.1" - jest-matcher-utils "^27.5.1" - jest-message-util "^27.5.1" - jest-util "^27.5.1" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" natural-compare "^1.4.0" - pretty-format "^27.5.1" - semver "^7.3.2" + pretty-format "^29.7.0" + semver "^7.5.3" jest-util@^27.5.1: version "27.5.1" @@ -7822,68 +7929,55 @@ jest-util@^27.5.1: graceful-fs "^4.2.9" picomatch "^2.2.3" -jest-util@^28.1.3: - version "28.1.3" - resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-28.1.3.tgz#f4f932aa0074f0679943220ff9cbba7e497028b0" - integrity sha512-XdqfpHwpcSRko/C35uLYFM2emRAltIIKZiJ9eAmhjsj0CqZMa0p1ib0R5fWIqGhn1a103DebTbpqIaP1qCQ6tQ== +jest-util@^29.4.0, jest-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.7.0.tgz#23c2b62bfb22be82b44de98055802ff3710fc0bc" + integrity sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA== dependencies: - "@jest/types" "^28.1.3" + "@jest/types" "^29.6.3" "@types/node" "*" chalk "^4.0.0" ci-info "^3.2.0" graceful-fs "^4.2.9" picomatch "^2.2.3" -jest-validate@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-27.5.1.tgz#9197d54dc0bdb52260b8db40b46ae668e04df067" - integrity sha512-thkNli0LYTmOI1tDB3FI1S1RTp/Bqyd9pTarJwL87OIBFuqEb5Apv5EaApEudYg4g86e3CT6kM0RowkhtEnCBQ== +jest-validate@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.7.0.tgz#7bf705511c64da591d46b15fce41400d52147d9c" + integrity sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw== dependencies: - "@jest/types" "^27.5.1" + "@jest/types" "^29.6.3" camelcase "^6.2.0" chalk "^4.0.0" - jest-get-type "^27.5.1" + jest-get-type "^29.6.3" leven "^3.1.0" - pretty-format "^27.5.1" + pretty-format "^29.7.0" -jest-watch-typeahead@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-1.1.0.tgz#b4a6826dfb9c9420da2f7bc900de59dad11266a9" - integrity sha512-Va5nLSJTN7YFtC2jd+7wsoe1pNe5K4ShLux/E5iHEwlB9AxaxmggY7to9KUqKojhaJw3aXqt5WAb4jGPOolpEw== - dependencies: - ansi-escapes "^4.3.1" - chalk "^4.0.0" - jest-regex-util "^28.0.0" - jest-watcher "^28.0.0" - slash "^4.0.0" +jest-watch-typeahead@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-2.2.2.tgz#5516d3cd006485caa5cfc9bd1de40f1f8b136abf" + integrity sha512-+QgOFW4o5Xlgd6jGS5X37i08tuuXNW8X0CV9WNFi+3n8ExCIP+E1melYhvYLjv5fE6D0yyzk74vsSO8I6GqtvQ== + dependencies: + ansi-escapes "^6.0.0" + chalk "^5.2.0" + jest-regex-util "^29.0.0" + jest-watcher "^29.0.0" + slash "^5.0.0" string-length "^5.0.1" strip-ansi "^7.0.1" -jest-watcher@^27.5.1: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-27.5.1.tgz#71bd85fb9bde3a2c2ec4dc353437971c43c642a2" - integrity sha512-z676SuD6Z8o8qbmEGhoEUFOM1+jfEiL3DXHK/xgEiG2EyNYfFG60jluWcupY6dATjfEsKQuibReS1djInQnoVw== - dependencies: - "@jest/test-result" "^27.5.1" - "@jest/types" "^27.5.1" - "@types/node" "*" - ansi-escapes "^4.2.1" - chalk "^4.0.0" - jest-util "^27.5.1" - string-length "^4.0.1" - -jest-watcher@^28.0.0: - version "28.1.3" - resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-28.1.3.tgz#c6023a59ba2255e3b4c57179fc94164b3e73abd4" - integrity sha512-t4qcqj9hze+jviFPUN3YAtAEeFnr/azITXQEMARf5cMwKY2SMBRnCQTXLixTl20OR6mLh9KLMrgVJgJISym+1g== +jest-watcher@^29.0.0, jest-watcher@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.7.0.tgz#7810d30d619c3a62093223ce6bb359ca1b28a2f2" + integrity sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g== dependencies: - "@jest/test-result" "^28.1.3" - "@jest/types" "^28.1.3" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" "@types/node" "*" ansi-escapes "^4.2.1" chalk "^4.0.0" - emittery "^0.10.2" - jest-util "^28.1.3" + emittery "^0.13.1" + jest-util "^29.7.0" string-length "^4.0.1" jest-worker@^26.2.1: @@ -7913,19 +8007,25 @@ jest-worker@^28.0.2: merge-stream "^2.0.0" supports-color "^8.0.0" -jest@^27.4.3: - version "27.5.1" - resolved "https://registry.yarnpkg.com/jest/-/jest-27.5.1.tgz#dadf33ba70a779be7a6fc33015843b51494f63fc" - integrity sha512-Yn0mADZB89zTtjkPJEXwrac3LHudkQMR+Paqa8uxJHCBr9agxztUifWCyiYrjhMPBoUVBjyny0I7XH6ozDr7QQ== +jest-worker@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.7.0.tgz#acad073acbbaeb7262bd5389e1bcf43e10058d4a" + integrity sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw== dependencies: - "@jest/core" "^27.5.1" - import-local "^3.0.2" - jest-cli "^27.5.1" + "@types/node" "*" + jest-util "^29.7.0" + merge-stream "^2.0.0" + supports-color "^8.0.0" -js-levenshtein@^1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" - integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g== +jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest/-/jest-29.7.0.tgz#994676fc24177f088f1c5e3737f5697204ff2613" + integrity sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw== + dependencies: + "@jest/core" "^29.7.0" + "@jest/types" "^29.6.3" + import-local "^3.0.2" + jest-cli "^29.7.0" js-sha3@0.8.0: version "0.8.0" @@ -7980,38 +8080,37 @@ jsdoc@^4.0.0: strip-json-comments "^3.1.0" underscore "~1.13.2" -jsdom@^16.6.0: - version "16.7.0" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-16.7.0.tgz#918ae71965424b197c819f8183a754e18977b710" - integrity sha512-u9Smc2G1USStM+s/x1ru5Sxrl6mPYCbByG1U/hUmqaVsm4tbNyS7CicOSRyuGQYZhTu0h84qkZZQ/I+dzizSVw== +jsdom@^20.0.0: + version "20.0.3" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-20.0.3.tgz#886a41ba1d4726f67a8858028c99489fed6ad4db" + integrity sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ== dependencies: - abab "^2.0.5" - acorn "^8.2.4" - acorn-globals "^6.0.0" - cssom "^0.4.4" + abab "^2.0.6" + acorn "^8.8.1" + acorn-globals "^7.0.0" + cssom "^0.5.0" cssstyle "^2.3.0" - data-urls "^2.0.0" - decimal.js "^10.2.1" - domexception "^2.0.1" + data-urls "^3.0.2" + decimal.js "^10.4.2" + domexception "^4.0.0" escodegen "^2.0.0" - form-data "^3.0.0" - html-encoding-sniffer "^2.0.1" - http-proxy-agent "^4.0.1" - https-proxy-agent "^5.0.0" + form-data "^4.0.0" + html-encoding-sniffer "^3.0.0" + http-proxy-agent "^5.0.0" + https-proxy-agent "^5.0.1" is-potential-custom-element-name "^1.0.1" - nwsapi "^2.2.0" - parse5 "6.0.1" - saxes "^5.0.1" + nwsapi "^2.2.2" + parse5 "^7.1.1" + saxes "^6.0.0" symbol-tree "^3.2.4" - tough-cookie "^4.0.0" - w3c-hr-time "^1.0.2" - w3c-xmlserializer "^2.0.0" - webidl-conversions "^6.1.0" - whatwg-encoding "^1.0.5" - whatwg-mimetype "^2.3.0" - whatwg-url "^8.5.0" - ws "^7.4.6" - xml-name-validator "^3.0.0" + tough-cookie "^4.1.2" + w3c-xmlserializer "^4.0.0" + webidl-conversions "^7.0.0" + whatwg-encoding "^2.0.0" + whatwg-mimetype "^3.0.0" + whatwg-url "^11.0.0" + ws "^8.11.0" + xml-name-validator "^4.0.0" jsesc@^3.0.2, jsesc@~3.0.2: version "3.0.2" @@ -8253,23 +8352,15 @@ lodash.uniq@^4.5.0: resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== -lodash@^4.17.15, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.7.0: +lodash@^4.17.15, lodash@^4.17.20, lodash@^4.17.21: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== -log-symbols@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" - integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== - dependencies: - chalk "^4.1.0" - is-unicode-supported "^0.1.0" - -long@^5.0.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/long/-/long-5.2.0.tgz#2696dadf4b4da2ce3f6f6b89186085d94d52fd61" - integrity sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w== +long@^5.0.0, long@^5.2.3: + version "5.2.3" + resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" + integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" @@ -8553,31 +8644,29 @@ ms@2.1.3, ms@^2.1.1, ms@^2.1.3: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -msw@^0.36.8: - version "0.36.8" - resolved "https://registry.yarnpkg.com/msw/-/msw-0.36.8.tgz#33ff8bfb0299626a95f43d0e4c3dc2c73c17f1ba" - integrity sha512-K7lOQoYqhGhTSChsmHMQbf/SDCsxh/m0uhN6Ipt206lGoe81fpTmaGD0KLh4jUxCONMOUnwCSj0jtX2CM4pEdw== - dependencies: - "@mswjs/cookies" "^0.1.7" - "@mswjs/interceptors" "^0.12.7" - "@open-draft/until" "^1.0.3" - "@types/cookie" "^0.4.1" - "@types/inquirer" "^8.1.3" - "@types/js-levenshtein" "^1.1.0" - chalk "4.1.1" - chokidar "^3.4.2" - cookie "^0.4.1" - graphql "^15.5.1" - headers-utils "^3.0.2" - inquirer "^8.2.0" - is-node-process "^1.0.1" - js-levenshtein "^1.1.6" - node-fetch "^2.6.7" - path-to-regexp "^6.2.0" - statuses "^2.0.0" - strict-event-emitter "^0.2.0" - type-fest "^1.2.2" - yargs "^17.3.0" +msw@^2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/msw/-/msw-2.7.0.tgz#d13ff87f7e018fc4c359800ff72ba5017033fb56" + integrity sha512-BIodwZ19RWfCbYTxWTUfTXc+sg4OwjCAgxU1ZsgmggX/7S3LdUifsbUPJs61j0rWb19CZRGY5if77duhc0uXzw== + dependencies: + "@bundled-es-modules/cookie" "^2.0.1" + "@bundled-es-modules/statuses" "^1.0.1" + "@bundled-es-modules/tough-cookie" "^0.1.6" + "@inquirer/confirm" "^5.0.0" + "@mswjs/interceptors" "^0.37.0" + "@open-draft/deferred-promise" "^2.2.0" + "@open-draft/until" "^2.1.0" + "@types/cookie" "^0.6.0" + "@types/statuses" "^2.0.4" + graphql "^16.8.1" + headers-polyfill "^4.0.2" + is-node-process "^1.2.0" + outvariant "^1.4.3" + path-to-regexp "^6.3.0" + picocolors "^1.1.1" + strict-event-emitter "^0.5.1" + type-fest "^4.26.1" + yargs "^17.7.2" multicast-dns@^7.2.5: version "7.2.5" @@ -8587,10 +8676,10 @@ multicast-dns@^7.2.5: dns-packet "^5.2.2" thunky "^1.0.2" -mute-stream@0.0.8: - version "0.0.8" - resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" - integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== +mute-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-2.0.0.tgz#a5446fc0c512b71c83c44d908d5c7b7b4c493b2b" + integrity sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA== nano-time@1.0.0: version "1.0.0" @@ -8639,13 +8728,6 @@ node-emoji@^1.10.0: dependencies: lodash "^4.17.21" -node-fetch@^2.6.7: - version "2.7.0" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" - integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== - dependencies: - whatwg-url "^5.0.0" - node-forge@^1: version "1.3.1" resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.1.tgz#be8da2af243b2417d5f646a770663a92b7e9ded3" @@ -8695,7 +8777,7 @@ numeral@^2.0.6: resolved "https://registry.yarnpkg.com/numeral/-/numeral-2.0.6.tgz#4ad080936d443c2561aed9f2197efffe25f4e506" integrity sha1-StCAk21EPCVhrtnyGX7//iX05QY= -nwsapi@^2.2.0: +nwsapi@2.2.13, nwsapi@^2.2.2: version "2.2.13" resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.13.tgz#e56b4e98960e7a040e5474536587e599c4ff4655" integrity sha512-cTGB9ptp9dY9A5VbMSe7fQBcl/tt22Vcqdq8+eN93rblOuE0aCFu4aZ2vMwct/2t+lFnosm8RkQW1I0Omb1UtQ== @@ -8799,7 +8881,7 @@ once@^1.3.0: dependencies: wrappy "1" -onetime@^5.1.0, onetime@^5.1.2: +onetime@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== @@ -8839,27 +8921,7 @@ optionator@^0.9.3: type-check "^0.4.0" word-wrap "^1.2.5" -ora@^5.4.1: - version "5.4.1" - resolved "https://registry.yarnpkg.com/ora/-/ora-5.4.1.tgz#1b2678426af4ac4a509008e5e4ac9e9959db9e18" - integrity sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ== - dependencies: - bl "^4.1.0" - chalk "^4.1.0" - cli-cursor "^3.1.0" - cli-spinners "^2.5.0" - is-interactive "^1.0.0" - is-unicode-supported "^0.1.0" - log-symbols "^4.1.0" - strip-ansi "^6.0.0" - wcwidth "^1.0.1" - -os-tmpdir@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== - -outvariant@^1.2.0: +outvariant@^1.4.0, outvariant@^1.4.3: version "1.4.3" resolved "https://registry.yarnpkg.com/outvariant/-/outvariant-1.4.3.tgz#221c1bfc093e8fec7075497e7799fdbf43d14873" integrity sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA== @@ -8871,7 +8933,7 @@ p-limit@^2.0.0, p-limit@^2.2.0: dependencies: p-try "^2.0.0" -p-limit@^3.0.2: +p-limit@^3.0.2, p-limit@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== @@ -8949,11 +9011,18 @@ parse-json@^5.0.0, parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse5@6.0.1, parse5@^6.0.0: +parse5@^6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== +parse5@^7.0.0, parse5@^7.1.1: + version "7.2.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.2.1.tgz#8928f55915e6125f430cc44309765bf17556a33a" + integrity sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ== + dependencies: + entities "^4.5.0" + parseurl@~1.3.2, parseurl@~1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" @@ -8997,7 +9066,7 @@ path-to-regexp@0.1.10: resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b" integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w== -path-to-regexp@^6.2.0: +path-to-regexp@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.3.0.tgz#2b6a26a337737a8e1416f9272ed0766b1c0389f4" integrity sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ== @@ -9012,7 +9081,7 @@ performance-now@^2.1.0: resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== -picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0: +picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -9600,7 +9669,7 @@ pretty-format@^27.0.0: ansi-styles "^5.0.0" react-is "^17.0.1" -pretty-format@^27.0.2, pretty-format@^27.4.6, pretty-format@^27.5.1: +pretty-format@^27.0.2, pretty-format@^27.4.6: version "27.5.1" resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-27.5.1.tgz#2181879fdea51a7a5851fb39d920faa63f01d88e" integrity sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ== @@ -9609,13 +9678,12 @@ pretty-format@^27.0.2, pretty-format@^27.4.6, pretty-format@^27.5.1: ansi-styles "^5.0.0" react-is "^17.0.1" -pretty-format@^28.1.3: - version "28.1.3" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-28.1.3.tgz#c9fba8cedf99ce50963a11b27d982a9ae90970d5" - integrity sha512-8gFb/To0OmxHR9+ZTb14Df2vNxdGCX8g1xWGUTqUw5TiZvcQf5sHKObd5UcPyLLyowNwDAMTF3XWOG1B6mxl1Q== +pretty-format@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812" + integrity sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ== dependencies: - "@jest/schemas" "^28.1.3" - ansi-regex "^5.0.1" + "@jest/schemas" "^29.6.3" ansi-styles "^5.0.0" react-is "^18.0.0" @@ -9708,20 +9776,27 @@ proxy-addr@~2.0.7: ipaddr.js "1.9.1" psl@^1.1.33: - version "1.9.0" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" - integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== + version "1.15.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.15.0.tgz#bdace31896f1d97cec6a79e8224898ce93d974c6" + integrity sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w== + dependencies: + punycode "^2.3.1" punycode.js@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/punycode.js/-/punycode.js-2.3.1.tgz#6b53e56ad75588234e79f4affa90972c7dd8cdb7" integrity sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA== -punycode@^2.1.0, punycode@^2.1.1: +punycode@^2.1.0, punycode@^2.1.1, punycode@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== +pure-rand@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" + integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== + qs@6.13.0: version "6.13.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" @@ -9962,20 +10037,20 @@ react-remove-scroll@^2.6.0: use-callback-ref "^1.3.0" use-sidecar "^1.1.2" -react-router-dom@<6.4.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.3.0.tgz#a0216da813454e521905b5fa55e0e5176123f43d" - integrity sha512-uaJj7LKytRxZNQV8+RbzJWnJ8K2nPsOOEuX7aQstlMZKQT0164C+X2w6bnkqU3sjtLvpd5ojrezAyfZ1+0sStw== +react-router-dom@^6.28.0: + version "6.28.0" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.28.0.tgz#f73ebb3490e59ac9f299377062ad1d10a9f579e6" + integrity sha512-kQ7Unsl5YdyOltsPGl31zOjLrDv+m2VcIEcIHqYYD3Lp0UppLjrzcfJqDJwXxFw3TH/yvapbnUvPlAj7Kx5nbg== dependencies: - history "^5.2.0" - react-router "6.3.0" + "@remix-run/router" "1.21.0" + react-router "6.28.0" -react-router@6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.3.0.tgz#3970cc64b4cb4eae0c1ea5203a80334fdd175557" - integrity sha512-7Wh1DzVQ+tlFjkeo+ujvjSqSJmkt1+8JO+T5xklPlgrh70y7ogx75ODRW0ThWhY7S+6yEDks8TYrtQe/aoboBQ== +react-router@6.28.0: + version "6.28.0" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.28.0.tgz#29247c86d7ba901d7e5a13aa79a96723c3e59d0d" + integrity sha512-HrYdIFqdrnhDw0PqG/AKjAqEqM7AvxCz0DQ4h2W8k6nqmc5uRBYDag0SBxx9iYz5G8gnuNVLzUe13wl9eAsXXg== dependencies: - history "^5.2.0" + "@remix-run/router" "1.21.0" react-style-singleton@^2.2.1: version "2.2.1" @@ -10031,7 +10106,7 @@ readable-stream@^2.0.1: string_decoder "~1.1.1" util-deprecate "~1.0.1" -readable-stream@^3.0.6, readable-stream@^3.4.0: +readable-stream@^3.0.6: version "3.6.2" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== @@ -10301,10 +10376,10 @@ resolve-url-loader@^5.0.0: postcss "^8.2.14" source-map "0.6.1" -resolve.exports@^1.1.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-1.1.1.tgz#05cfd5b3edf641571fd46fa608b610dda9ead999" - integrity sha512-/NtpHNDN7jWhAaQ9BvBUYZ6YTXsRBgfqWFWP7BZBaoMJO/I3G5OFzvTuWNlZC3aPjins1F+TNrLKsGbH4rfsRQ== +resolve.exports@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.3.tgz#41955e6f1b4013b7586f873749a635dea07ebe3f" + integrity sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A== resolve@^1.14.2, resolve@^1.19.0, resolve@^1.20.0, resolve@^1.22.4: version "1.22.8" @@ -10333,14 +10408,6 @@ resolve@^2.0.0-next.5: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -restore-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" - integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== - dependencies: - onetime "^5.1.0" - signal-exit "^3.0.2" - retry@^0.13.1: version "0.13.1" resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658" @@ -10351,7 +10418,7 @@ reusify@^1.0.4: resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== -rimraf@3.0.2, rimraf@^3.0.0, rimraf@^3.0.2: +rimraf@3.0.2, rimraf@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== @@ -10415,11 +10482,6 @@ rollup@^2.43.1, rollup@^2.68.0: optionalDependencies: fsevents "~2.3.2" -run-async@^2.4.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455" - integrity sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ== - run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" @@ -10427,13 +10489,6 @@ run-parallel@^1.1.9: dependencies: queue-microtask "^1.2.2" -rxjs@^7.2.0, rxjs@^7.5.5: - version "7.8.1" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543" - integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg== - dependencies: - tslib "^2.1.0" - safe-array-concat@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb" @@ -10481,10 +10536,10 @@ sass-loader@^12.3.0: klona "^2.0.4" neo-async "^2.6.2" -saxes@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/saxes/-/saxes-5.0.1.tgz#eebab953fa3b7608dbe94e5dadb15c888fa6696d" - integrity sha512-5LBh1Tls8c9xgGjw3QrMwETmTMVk0oFgvrFSvWx62llR2hcEInrKNZ2GZCCuuy2lvWrdl5jhbpeqc5hRYKFOcw== +saxes@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/saxes/-/saxes-6.0.0.tgz#fe5b4a4768df4f14a201b1ba6a65c1f3d9988cc5" + integrity sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA== dependencies: xmlchars "^2.2.0" @@ -10588,10 +10643,10 @@ serialize-javascript@^6.0.0, serialize-javascript@^6.0.1: dependencies: randombytes "^2.1.0" -serialize-query-params@^1.3.5: - version "1.3.6" - resolved "https://registry.yarnpkg.com/serialize-query-params/-/serialize-query-params-1.3.6.tgz#5dd5225db85ce747fe6fbc4897628504faafec6d" - integrity sha512-VlH7sfWNyPVZClPkRacopn6sn5uQMXBsjPVz1+pBHX895VpcYVznfJtZ49e6jymcrz+l/vowkepCZn/7xEAEdw== +serialize-query-params@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/serialize-query-params/-/serialize-query-params-2.0.2.tgz#598a3fb9e13f4ea1c1992fbd20231aa16b31db81" + integrity sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q== serve-index@^1.9.1: version "1.9.1" @@ -10616,11 +10671,6 @@ serve-static@1.16.2: parseurl "~1.3.3" send "0.19.0" -set-cookie-parser@^2.4.6: - version "2.7.0" - resolved "https://registry.yarnpkg.com/set-cookie-parser/-/set-cookie-parser-2.7.0.tgz#ef5552b56dc01baae102acb5fc9fb8cd060c30f9" - integrity sha512-lXLOiqpkUumhRdFF3k1osNXCy9akgx/dyPZ5p8qAg9seJzXr5ZrlqZuWIMuY6ejOsVLE6flJ5/h3lsn57fQ/PQ== - set-function-length@^1.2.1: version "1.2.2" resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" @@ -10685,11 +10735,16 @@ side-channel@^1.0.4, side-channel@^1.0.6: get-intrinsic "^1.2.4" object-inspect "^1.13.1" -signal-exit@^3.0.2, signal-exit@^3.0.3: +signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +signal-exit@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + sisteransi@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" @@ -10700,10 +10755,10 @@ slash@^3.0.0: resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== -slash@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-4.0.0.tgz#2422372176c4c6c5addb5e2ada885af984b396a7" - integrity sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew== +slash@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-5.1.0.tgz#be3adddcdf09ac38eebe8dcdc7b1a57a75b095ce" + integrity sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg== snake-case@^3.0.4: version "3.0.4" @@ -10741,7 +10796,15 @@ source-map-loader@^3.0.0: iconv-lite "^0.6.3" source-map-js "^1.0.1" -source-map-support@^0.5.6, source-map-support@~0.5.20: +source-map-support@0.5.13: + version "0.5.13" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932" + integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map-support@~0.5.20: version "0.5.21" resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== @@ -10843,7 +10906,7 @@ static-eval@2.0.2: dependencies: escodegen "^1.8.1" -statuses@2.0.1, statuses@^2.0.0: +statuses@2.0.1, statuses@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== @@ -10860,12 +10923,10 @@ stop-iteration-iterator@^1.0.0: dependencies: internal-slot "^1.0.4" -strict-event-emitter@^0.2.0: - version "0.2.8" - resolved "https://registry.yarnpkg.com/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz#b4e768927c67273c14c13d20e19d5e6c934b47ca" - integrity sha512-KDf/ujU8Zud3YaLtMCcTI4xkZlZVIYxTLr+XIULexP+77EEVWixeXroLUXQXiVtH4XH2W7jr/3PT1v3zBuvc3A== - dependencies: - events "^3.3.0" +strict-event-emitter@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz#1602ece81c51574ca39c6815e09f1a3e8550bd93" + integrity sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ== strict-uri-encode@^2.0.0: version "2.0.0" @@ -11109,14 +11170,6 @@ supports-color@^8.0.0: dependencies: has-flag "^4.0.0" -supports-hyperlinks@^2.0.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" - integrity sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA== - dependencies: - has-flag "^4.0.0" - supports-color "^7.0.0" - supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" @@ -11188,14 +11241,6 @@ tempy@^0.6.0: type-fest "^0.16.0" unique-string "^2.0.0" -terminal-link@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/terminal-link/-/terminal-link-2.1.1.tgz#14a64a27ab3c0df933ea546fba55f2d078edc994" - integrity sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ== - dependencies: - ansi-escapes "^4.2.1" - supports-hyperlinks "^2.0.0" - terser-webpack-plugin@^5.2.5, terser-webpack-plugin@^5.3.10: version "5.3.10" resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz#904f4c9193c6fd2a03f693a2150c62a92f40d199" @@ -11236,16 +11281,6 @@ text-table@^0.2.0: resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== -throat@^6.0.1: - version "6.0.2" - resolved "https://registry.yarnpkg.com/throat/-/throat-6.0.2.tgz#51a3fbb5e11ae72e2cf74861ed5c8020f89f29fe" - integrity sha512-WKexMoJj3vEuK0yFEapj8y64V0A6xcuPuK9Gt1d0R+dzCSJc0lHqQytAbSB4cDAK0dWh4T0E2ETkoLE2WZ41OQ== - -through@^2.3.6: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== - thunky@^1.0.2: version "1.1.0" resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d" @@ -11256,13 +11291,6 @@ tiny-invariant@^1.0.6: resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.2.0.tgz#a1141f86b672a9148c72e978a19a73b9b94a15a9" integrity sha512-1Uhn/aqw5C6RI4KejVeTg6mIS7IqxnLJ8Mv2tV5rTc0qWobay7pDUz6Wi392Cnc8ak1H0F2cjoRzb2/AW4+Fvg== -tmp@^0.0.33: - version "0.0.33" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" - integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== - dependencies: - os-tmpdir "~1.0.2" - tmp@^0.2.1: version "0.2.3" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae" @@ -11290,7 +11318,7 @@ toidentifier@1.0.1: resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== -tough-cookie@^4.0.0: +tough-cookie@^4.1.2, tough-cookie@^4.1.4: version "4.1.4" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.4.tgz#945f1461b45b5a8c76821c33ea49c3ac192c1b36" integrity sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag== @@ -11307,18 +11335,13 @@ tr46@^1.0.1: dependencies: punycode "^2.1.0" -tr46@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-2.1.0.tgz#fa87aa81ca5d5941da8cbf1f9b749dc969a4e240" - integrity sha512-15Ih7phfcdP5YxqiB+iDtLoaTz4Nd35+IiAv0kQ5FNKHzXgdWqPoTIqEDDJmXceQt4JZk6lVPT8lnDlPpGDppw== +tr46@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-3.0.0.tgz#555c4e297a950617e8eeddef633c87d4d9d6cbf9" + integrity sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA== dependencies: punycode "^2.1.1" -tr46@~0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" - integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== - trim-trailing-lines@^1.0.0: version "1.1.4" resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" @@ -11410,10 +11433,10 @@ type-fest@^0.21.3: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== -type-fest@^1.2.2: - version "1.4.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-1.4.0.tgz#e9fb813fe3bf1744ec359d55d1affefa76f14be1" - integrity sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA== +type-fest@^4.26.1: + version "4.33.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.33.0.tgz#2da0c135b9afa76cf8b18ecfd4f260ecd414a432" + integrity sha512-s6zVrxuyKbbAsSAD5ZPTB77q4YIdRctkTbJ2/Dqlinwz+8ooH2gd+YA7VA6Pa93KML9GockVvoxjZ2vHP+mu8g== type-is@~1.6.18: version "1.6.18" @@ -11474,10 +11497,10 @@ typedarray-to-buffer@^3.1.5: dependencies: is-typedarray "^1.0.0" -typescript@^4.9.5: - version "4.9.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a" - integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== +typescript@~5.7.2: + version "5.7.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.7.2.tgz#3169cf8c4c8a828cde53ba9ecb3d2b1d5dd67be6" + integrity sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg== uc.micro@^2.0.0, uc.micro@^2.1.0: version "2.1.0" @@ -11514,6 +11537,11 @@ undici-types@~6.19.2: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== +undici-types@~6.20.0: + version "6.20.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.20.0.tgz#8171bf22c1f588d1554d55bf204bc624af388433" + integrity sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg== + unherit@^1.0.4: version "1.1.3" resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.3.tgz#6c9b503f2b41b262330c80e91c8614abdaa69c22" @@ -11709,12 +11737,12 @@ use-memo-one@^1.1.3: resolved "https://registry.yarnpkg.com/use-memo-one/-/use-memo-one-1.1.3.tgz#2fd2e43a2169eabc7496960ace8c79efef975e99" integrity sha512-g66/K7ZQGYrI6dy8GLpVcMsBp4s17xNkYJVSMvTEevGy3nDxHOfE6z8BVE22+5G5x7t3+bhzrlTDB7ObrEE0cQ== -use-query-params@^1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/use-query-params/-/use-query-params-1.2.3.tgz#306c31a0cbc714e8a3b4bd7e91a6a9aaccaa5e22" - integrity sha512-cdG0tgbzK+FzsV6DAt2CN8Saa3WpRnze7uC4Rdh7l15epSFq7egmcB/zuREvPNwO5Yk80nUpDZpiyHsoq50d8w== +use-query-params@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/use-query-params/-/use-query-params-2.2.1.tgz#c558ab70706f319112fbccabf6867b9f904e947d" + integrity sha512-i6alcyLB8w9i3ZK3caNftdb+UnbfBRNPDnc89CNQWkGRmDrm/gfydHvMBfVsQJRq3NoHOM2dt/ceBWG2397v1Q== dependencies: - serialize-query-params "^1.3.5" + serialize-query-params "^2.0.2" use-sidecar@^1.1.2: version "1.1.2" @@ -11749,14 +11777,14 @@ uuid@^8.3.0, uuid@^8.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== -v8-to-istanbul@^8.1.0: - version "8.1.1" - resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-8.1.1.tgz#77b752fd3975e31bbcef938f85e9bd1c7a8d60ed" - integrity sha512-FGtKtv3xIpR6BYhvgH8MI/y78oT7d8Au3ww4QIxymrCtZEh5b8gCw2siywE+puhEmuWKDtmfrvF5UlB298ut3w== +v8-to-istanbul@^9.0.1: + version "9.3.0" + resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175" + integrity sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA== dependencies: + "@jridgewell/trace-mapping" "^0.3.12" "@types/istanbul-lib-coverage" "^2.0.1" - convert-source-map "^1.6.0" - source-map "^0.7.3" + convert-source-map "^2.0.0" vary@~1.1.2: version "1.1.2" @@ -11786,21 +11814,14 @@ vfile@^4.0.0, vfile@^4.2.1: unist-util-stringify-position "^2.0.0" vfile-message "^2.0.0" -w3c-hr-time@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd" - integrity sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ== - dependencies: - browser-process-hrtime "^1.0.0" - -w3c-xmlserializer@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz#3e7104a05b75146cc60f564380b7f683acf1020a" - integrity sha512-4tzD0mF8iSiMiNs30BiLO3EpfGLZUT2MSX/G+o7ZywDzliWQ3OPtTZ0PTC3B3ca1UAf4cJMHB+2Bf56EriJuRA== +w3c-xmlserializer@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz#aebdc84920d806222936e3cdce408e32488a3073" + integrity sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw== dependencies: - xml-name-validator "^3.0.0" + xml-name-validator "^4.0.0" -walker@^1.0.7: +walker@^1.0.7, walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" integrity sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ== @@ -11822,37 +11843,20 @@ wbuf@^1.1.0, wbuf@^1.7.3: dependencies: minimalistic-assert "^1.0.0" -wcwidth@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" - integrity sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg== - dependencies: - defaults "^1.0.3" - web-namespaces@^1.0.0: version "1.1.4" resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== -webidl-conversions@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" - integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== - webidl-conversions@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg== -webidl-conversions@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-5.0.0.tgz#ae59c8a00b121543a2acc65c0434f57b0fc11aff" - integrity sha512-VlZwKPCkYKxQgeSbH5EyngOmRp7Ww7I9rQLERETtf5ofd9pGeswWiOtogpEO850jziPRarreGxn5QIiTqpb2wA== - -webidl-conversions@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-6.1.0.tgz#9111b4d7ea80acd40f5270d666621afa78b69514" - integrity sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w== +webidl-conversions@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a" + integrity sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g== webpack-dev-middleware@^5.3.4: version "5.3.4" @@ -11973,30 +11977,30 @@ websocket-extensions@>=0.1.1: resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42" integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg== -whatwg-encoding@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0" - integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw== +whatwg-encoding@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz#e7635f597fd87020858626805a2729fa7698ac53" + integrity sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg== dependencies: - iconv-lite "0.4.24" + iconv-lite "0.6.3" whatwg-fetch@^3.6.2: version "3.6.20" resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz#580ce6d791facec91d37c72890995a0b48d31c70" integrity sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg== -whatwg-mimetype@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" - integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g== +whatwg-mimetype@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz#5fa1a7623867ff1af6ca3dc72ad6b8a4208beba7" + integrity sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q== -whatwg-url@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" - integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== +whatwg-url@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-11.0.0.tgz#0a849eebb5faf2119b901bb76fd795c2848d4018" + integrity sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ== dependencies: - tr46 "~0.0.3" - webidl-conversions "^3.0.0" + tr46 "^3.0.0" + webidl-conversions "^7.0.0" whatwg-url@^7.0.0: version "7.1.0" @@ -12007,15 +12011,6 @@ whatwg-url@^7.0.0: tr46 "^1.0.1" webidl-conversions "^4.0.2" -whatwg-url@^8.0.0, whatwg-url@^8.5.0: - version "8.7.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-8.7.0.tgz#656a78e510ff8f3937bc0bcbe9f5c0ac35941b77" - integrity sha512-gAojqb/m9Q8a5IV96E3fHJM70AzCkgt4uXYX2O7EmuyOnLrViCQlsEBmF9UQIu3/aeAIp2U17rtbpZWNntQqdg== - dependencies: - lodash "^4.7.0" - tr46 "^2.1.0" - webidl-conversions "^6.1.0" - which-boxed-primitive@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" @@ -12254,7 +12249,7 @@ workbox-window@6.6.1: "@types/trusted-types" "^2.0.2" workbox-core "6.6.1" -wrap-ansi@^6.0.1: +wrap-ansi@^6.2.0: version "6.2.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== @@ -12287,20 +12282,23 @@ write-file-atomic@^3.0.0: signal-exit "^3.0.2" typedarray-to-buffer "^3.1.5" -ws@^7.4.6: - version "7.5.10" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9" - integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== +write-file-atomic@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd" + integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg== + dependencies: + imurmurhash "^0.1.4" + signal-exit "^3.0.7" -ws@^8.13.0: +ws@^8.11.0, ws@^8.13.0: version "8.18.0" resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== -xml-name-validator@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" - integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw== +xml-name-validator@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-4.0.0.tgz#79a006e2e63149a8600f15430f0a4725d1524835" + integrity sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw== xmlchars@^2.2.0: version "2.2.0" @@ -12332,30 +12330,12 @@ yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== -yargs-parser@^20.2.2: - version "20.2.9" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" - integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== - yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs@^16.2.0: - version "16.2.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" - integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== - dependencies: - cliui "^7.0.2" - escalade "^3.1.1" - get-caller-file "^2.0.5" - require-directory "^2.1.1" - string-width "^4.2.0" - y18n "^5.0.5" - yargs-parser "^20.2.2" - -yargs@^17.3.0: +yargs@^17.3.1, yargs@^17.7.2: version "17.7.2" resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== @@ -12373,6 +12353,11 @@ yocto-queue@^0.1.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +yoctocolors-cjs@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz#f4b905a840a37506813a7acaa28febe97767a242" + integrity sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA== + zod@^3.11.6: version "3.22.3" resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.3.tgz#2fbc96118b174290d94e8896371c95629e87a060"