Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5fdb79b
feat: Adding Docling RAG demo
franciscojavierarceo Mar 1, 2025
9a2e8a6
updated demo
franciscojavierarceo Mar 2, 2025
d3af45d
cleaned up notebook
franciscojavierarceo Mar 2, 2025
caba9e9
adding chunk id
franciscojavierarceo Mar 2, 2025
2b5f622
adding quickstart demo that is WIP and updating docling-demo to expor…
franciscojavierarceo Mar 2, 2025
092184c
adding current tentative exmaple repo
franciscojavierarceo Mar 2, 2025
c032c7d
adding current temporary work
franciscojavierarceo Mar 2, 2025
f234bcb
updating demo script to rename things
franciscojavierarceo Mar 3, 2025
37debb0
updated quickstart
franciscojavierarceo Mar 3, 2025
bcc4d10
added comment
franciscojavierarceo Mar 3, 2025
24e8cb8
checking in progress
franciscojavierarceo Mar 6, 2025
4fc74de
checking in progress for now, still have some issues with vector retr…
franciscojavierarceo Mar 7, 2025
afc2bb6
okay think i have most things working
franciscojavierarceo Mar 8, 2025
79cb8e1
removing commenting and unnecessary code
franciscojavierarceo Mar 9, 2025
267b740
uploading demo
franciscojavierarceo Mar 17, 2025
5ba452e
uploading other files
franciscojavierarceo Mar 23, 2025
1953f86
updated repo exaxmple
franciscojavierarceo Mar 27, 2025
a929cae
checking in current notebook, almost there
franciscojavierarceo Mar 27, 2025
61a1fdf
fixed linter
franciscojavierarceo Mar 27, 2025
62f2ed3
fixed transformation logic:
franciscojavierarceo Apr 1, 2025
e829840
removed print
franciscojavierarceo Apr 1, 2025
e12731a
added README with description
franciscojavierarceo Apr 2, 2025
18251ce
removing print
franciscojavierarceo Apr 2, 2025
20e5ab2
updating
franciscojavierarceo Apr 2, 2025
ccc4be3
updating metadata file
franciscojavierarceo Apr 2, 2025
bacee11
updated readme and adding dataset
franciscojavierarceo Apr 2, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
checking in progress
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
  • Loading branch information
franciscojavierarceo committed Mar 23, 2025
commit 24e8cb82361ccaf10a9fc0047d1a802c82236fff
82 changes: 81 additions & 1 deletion examples/rag-docling/feature_repo/example_repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
FileSource,
)
from feast.data_format import ParquetFormat
from feast.types import Float64, Array, String, ValueType
from feast.types import Float64, Array, String, ValueType, Bytes
from feast import Entity, RequestSource
from feast.on_demand_feature_view import on_demand_feature_view
from sentence_transformers import SentenceTransformer
Expand Down Expand Up @@ -74,3 +74,83 @@ def embed_chunk(inputs: Dict[str, Any]) -> Dict[str, List[float]]:
source=source,
ttl=timedelta(hours=2),
)

import hashlib

def generate_chunk_id(file_name: str, raw_chunk_markdown: str="") -> str:
"""Generate a unique chunk ID based on file_name and raw_chunk_markdown."""
unique_string = f"{file_name}-{raw_chunk_markdown}" if raw_chunk_markdown != "" else f"{file_name}"
return hashlib.sha256(unique_string.encode()).hexdigest()

from docling.datamodel.base_models import DocumentStream
# Load tokenizer and embedding model
EMBED_MODEL_ID = "sentence-transformers/all-MiniLM-L6-v2"
MAX_TOKENS = 64 # Small token limit for demonstration

import io
from docling.document_converter import DocumentConverter
from transformers import AutoTokenizer
from sentence_transformers import SentenceTransformer
from docling.chunking import HybridChunker

tokenizer = AutoTokenizer.from_pretrained(EMBED_MODEL_ID)
embedding_model = SentenceTransformer(EMBED_MODEL_ID)
chunker = HybridChunker(tokenizer=tokenizer, max_tokens=MAX_TOKENS, merge_peers=True)

input_request_pdf = RequestSource(
name="pdf_request_source",
schema=[
Field(name="pdf_bytes", dtype=Bytes),
Field(name="file_name", dtype=String),
],
)

import pandas as pd
@on_demand_feature_view(
sources=[input_request_pdf],
schema=[
Field(name="document_id", dtype=String, vector_index=False),
Field(name="chunk_id", dtype=Array(String), vector_index=False),
Field(name="chunk_text", dtype=Array(String), vector_index=False),
Field(name="vector", dtype=Array(Float64), vector_index=False),
],
mode="python",
singleton=False,
write_to_online_store=False,
explode=True,
)
def docling_transform_docs(inputs: Dict[str, Any]):
try:
buf = io.BytesIO(
inputs['pdf_bytes'],
)

source = DocumentStream(name=inputs['file_name'], stream=buf)
converter = DocumentConverter()
result = converter.convert(source)
document_id = generate_chunk_id(inputs["file_name"])
output, chunks, embeddings, chunk_ids = [], [], [], []
for chunk in chunker.chunk(dl_doc=result.document):
raw_chunk = chunker.serialize(chunk=chunk)
embedding = embed_chunk(raw_chunk)
chunk_id = generate_chunk_id(inputs["file_name"], raw_chunk)
# chunk_dict = {
# "document_id": document_id,
# "chunk_id": chunk_id,
# "vector": embedding,
# "chunk_text": raw_chunk,
# }
chunks.append(raw_chunk)
chunk_ids.append(chunk_id)
embeddings.append(embedding)
# output.append(chunk_dict)
return {"document_id": document_id, "chunk_id": chunks, "vector": embedding, "chunk_text": chunks}


except Exception as e:
print(e)
return {"document_id": "missing", "chunk_id": ["missing"], "vector": [[0.1, 0.2]], "chunk_text": ["missing"]}

return pd.DataFrame(output)


Original file line number Diff line number Diff line change
Expand Up @@ -1001,7 +1001,7 @@ def test_sqlite_get_online_documents_v2_search() -> None:
assert result["distance"] == [-1.8458267450332642, -1.8458267450332642]


@pytest.mark.skip(reason="Skipping this test as CI struggles with it")
# @pytest.mark.skip(reason="Skipping this test as CI struggles with it")
def test_local_milvus() -> None:
import random

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
RequestSource,
)
from feast.driver_test_data import create_driver_hourly_stats_df
from feast.nlp_test_data import create_document_chunks_df
from feast.feature_view import DUMMY_ENTITY_FIELD
from feast.field import Field
from feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig
Expand Down Expand Up @@ -843,6 +844,7 @@ def test_stored_writes(self):
)

driver = Entity(name="driver", join_keys=["driver_id"])
word = Entity(name="word", join_keys=["word_id"])

driver_stats_source = FileSource(
name="driver_hourly_stats_source",
Expand Down