MRP999 commited on
Commit
5e6b02b
·
verified ·
1 Parent(s): 00a60c2

Upload 5 files

Browse files
Files changed (5) hide show
  1. Dockerfile +13 -0
  2. app.py +140 -0
  3. pinecone_embeddings.py +153 -0
  4. requirements.txt +19 -0
  5. semantic_aware.py +202 -0
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI, Request, HTTPException, status, BackgroundTasks
3
+ from fastapi.responses import JSONResponse
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from pydantic import BaseModel
6
+ from typing import List
7
+ from google import genai
8
+ from semantic_aware import load_document
9
+ import hashlib
10
+ import httpx
11
+ from datetime import datetime
12
+ import re
13
+ from pinecone import Pinecone
14
+ from pinecone_embeddings import PineconeVectorStore
15
+
16
+
17
+ # Configuration
18
+ EMBEDDING_MODEL = "BAAI/bge-base-en-v1.5"
19
+ PINECONE_INDEX = 'policy-documents'
20
+ CACHE_DIR = "./document_cache"
21
+ os.makedirs(CACHE_DIR, exist_ok=True)
22
+ pinecone = Pinecone(
23
+ api_key=os.getenv("PINECONE_API_KEY"),
24
+ environment=os.getenv("PINECONE_ENV")
25
+ )
26
+
27
+ # Initialize Gemini
28
+ # genai.configure(api_key=os.environ["GEMINI_API_KEY"])
29
+ # model = genai.GenerativeModel('gemini-1.5-flash')
30
+
31
+ app = FastAPI()
32
+ app.add_middleware(
33
+ CORSMiddleware,
34
+ allow_origins=["*"],
35
+ allow_methods=["*"],
36
+ allow_headers=["*"],
37
+ )
38
+
39
+ class QueryRequest(BaseModel):
40
+ documents: str
41
+ questions: List[str]
42
+
43
+ class QueryResponse(BaseModel):
44
+ answers: List[str]
45
+
46
+ def document_cache_key(url: str) -> str:
47
+ return hashlib.md5(url.encode()).hexdigest()
48
+
49
+ async def fetch_with_cache(url: str) -> str:
50
+ """Download with caching"""
51
+ cache_key = document_cache_key(url)
52
+ cache_path = os.path.join(CACHE_DIR, f"{cache_key}.pdf")
53
+
54
+ if os.path.exists(cache_path):
55
+ return cache_path
56
+
57
+ async with httpx.AsyncClient() as client:
58
+ response = await client.get(url)
59
+ response.raise_for_status()
60
+ with open(cache_path, "wb") as f:
61
+ f.write(response.content)
62
+
63
+ return cache_path
64
+
65
+ def build_gemini_prompt(question: str, clauses: List[dict]) -> str:
66
+ """Strictly formatted prompt for Gemini"""
67
+ context = "\n\n".join(
68
+ f"CLAUSE {c.get('header', '')} (Page {c.get('page', 'N/A')}):\n{c['text']}"
69
+ for c in clauses
70
+ )
71
+
72
+ return f"""You are a strict, accurate assistant that answers insurance or policy-related questions using only provided clauses.
73
+
74
+ A user has asked the following question:
75
+ "{question}"
76
+
77
+ You must answer only based on the given text below, without guessing or skipping any information.
78
+ If an answer is partially stated or implied, respond accordingly with brief clarification.
79
+ If the information is not present at all, reply exactly: "Not mentioned in the provided clauses."
80
+
81
+ Clauses:
82
+ {context}
83
+
84
+ Respond with 1 to 3 sentences max.
85
+ Do not add explanations, formatting, bullet points, summaries, or any output other than the answer sentence.
86
+
87
+ """
88
+
89
+ # def extract_first_sentence(text: str) -> str:
90
+ # """Ensure single-sentence output"""
91
+ # sentences = re.split(r'(?<=[.!?])\s+', text.strip())
92
+ # return sentences[0] if sentences else text
93
+
94
+ @app.post("/query", response_model=QueryResponse)
95
+ async def answer_questions(request: Request, body: QueryRequest):
96
+ # Authentication
97
+ if request.headers.get("Authorization") != f"Bearer {os.environ['API_KEY']}":
98
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
99
+
100
+ try:
101
+ # 1. Process document
102
+ local_path = await fetch_with_cache(body.documents)
103
+ doc = load_document(local_path)
104
+
105
+ # 2. Initialize engine
106
+ vector_store = PineconeVectorStore(index_name=PINECONE_INDEX, pinecone=pinecone)
107
+ vector_store.overwrite_vectors(doc["chunks"], 'doc_a.pdf', pinecone)
108
+
109
+ # 3. Process questions
110
+ answers = []
111
+ client = genai.Client(
112
+ api_key=os.environ["GEMINI_API_KEY"]
113
+ )
114
+
115
+ for question in body.questions:
116
+ # Retrieve relevant clauses
117
+ clauses = vector_store.retrieve_chunks(question, pinecone, top_k=5)
118
+
119
+ # print("\n\n")
120
+ # print(clauses)
121
+
122
+ # Generate answer with Gemini
123
+ prompt = build_gemini_prompt(question, clauses)
124
+ response = client.models.generate_content(
125
+ model="gemini-2.5-flash",
126
+ contents=prompt
127
+ )
128
+
129
+ # Strict formatting
130
+ # answer = extract_first_sentence(response.text)
131
+ # print(response.text)
132
+ answers.append(response.text)
133
+
134
+ return {"answers": answers}
135
+
136
+ except Exception as e:
137
+ raise HTTPException(
138
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
139
+ detail=str(e)
140
+ )
pinecone_embeddings.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pinecone import Pinecone, ServerlessSpec
2
+ from pinecone_text.sparse import BM25Encoder # For BM25 sparse vectors
3
+ import numpy as np
4
+ import re
5
+ import hashlib
6
+ from typing import List, Dict
7
+ import os
8
+ import time
9
+
10
+ from sentence_transformers import SentenceTransformer
11
+
12
+ # Initialize Pinecone at the module level
13
+ # It's better to initialize it once at the start of your application
14
+
15
+
16
+ def get_index(pinecone: Pinecone, index_name: str):
17
+ # 1. Delete existing index if it exists
18
+ if index_name in pinecone.list_indexes().names():
19
+ print(f"Deleting existing index: {index_name}")
20
+ pinecone.delete_index(index_name)
21
+ print(f"Index {index_name} deleted.")
22
+ else:
23
+ print(f"Index {index_name} does not exist, no deletion necessary.")
24
+
25
+ # 2. Create fresh index using create_index_for_model for integrated embedding
26
+ print(f"Creating new index: {index_name} with integrated 'llama-text-embed-v2'")
27
+ pinecone.create_index( # Corrected from create_index_for_model
28
+ name=index_name,
29
+ metric="cosine", # llama-text-embed-v2 uses cosine or dotproduct
30
+ dimension=768, # default dimension for llama-text-embed-v2
31
+ # embed parameter should be at the top-level of create_index for integrated models
32
+ # and 'field_map' is not used directly in create_index for embedded models
33
+ # Instead, it's inferred from the text being passed in the 'upsert' method
34
+ # We will specify the embedding model when upserting.
35
+ spec=ServerlessSpec(cloud="aws", region="us-east-1")
36
+ )
37
+ print(f"Index {index_name} created.")
38
+
39
+ # 3. Wait for the index to be ready
40
+ while not pinecone.describe_index(index_name).status['ready']:
41
+ print("Waiting for index to be ready...")
42
+ time.sleep(5)
43
+ print(f"Index {index_name} is now ready.")
44
+
45
+ return pinecone.Index(index_name)
46
+
47
+ # class EmbeddingEngine:
48
+ # def __init__(self, model_name: str = 'BAAI/bge-base-en-v1.5'):
49
+ # # For now, we are relying on Pinecone's integrated embedding for dense vectors.
50
+ # # This class might be used for other purposes or for local embedding generation if needed later.
51
+ # self.model = SentenceTransformer(model_name)
52
+ # self.model.max_seq_length = 512
53
+
54
+ # def encode(self, texts: List[str], batch_size: int = 32) -> np.ndarray:
55
+ # prefixed_texts = [
56
+ # f"query: {text}" if "query:" not in text.lower() else text
57
+ # for text in texts
58
+ # ]
59
+ # return self.model.encode(prefixed_texts, batch_size=batch_size, convert_to_numpy=True)
60
+
61
+ class PineconeVectorStore:
62
+ def __init__(self, index_name: str, pinecone: Pinecone, dimension: int = 1024): # default dimension for llama-text-embed-v2
63
+ self.index_name = index_name
64
+ self.dimension = dimension
65
+
66
+ self.index = get_index(pinecone, index_name)
67
+
68
+ # Initialize BM25 encoder for sparse vectors
69
+ self.bm25_encoder = BM25Encoder()
70
+
71
+ # Fit BM25 encoder on a representative corpus of your data.
72
+ # This is crucial for BM25's effectiveness.
73
+ # For this example, we'll fit on a small sample. In a real scenario,
74
+ # you'd fit it on a larger corpus of your document chunks.
75
+ print("Fitting BM25Encoder...")
76
+ sample_corpus = ["This is a document about machine learning.", "Another document discussing natural language processing.", "A third document focused on artificial intelligence applications."]
77
+ self.bm25_encoder.fit(sample_corpus)
78
+ print("BM25Encoder fitted.")
79
+
80
+ def overwrite_vectors(self, document_chunks, pdf_filename: str, pinecone: Pinecone):
81
+ """
82
+ Completely replaces all vectors in the index with new data from a PDF.
83
+ Leverages Pinecone's integrated embedding for dense vectors and BM25 for sparse.
84
+ """
85
+ # Ensure the index is recreated before processing each new PDF
86
+ # self.index = get_index(pinecone, self.index_name)
87
+
88
+ inputs = [f"query: {text['text']}" for text in document_chunks]
89
+
90
+ # embeddings = pinecone.inference.embed(
91
+ # model = 'llama-text-embed-v2',
92
+ # inputs = inputs,
93
+ # parameters={
94
+ # "input_type": "passage",
95
+ # "truncate": "END"
96
+ # }
97
+ # )
98
+
99
+ model = SentenceTransformer('BAAI/bge-base-en-v1.5')
100
+ embeddings = model.encode(inputs, batch_size=32, convert_to_numpy=True).tolist()
101
+
102
+ records_to_upsert = []
103
+ for i, chunk_text in enumerate(document_chunks):
104
+ # Ensure chunk_text is always a string before encoding
105
+
106
+ doc_id = hashlib.md5(f"{pdf_filename}-{chunk_text['text']}".encode('utf-8')).hexdigest()
107
+ sparse_vector = self.bm25_encoder.encode_documents([chunk_text["text"]])
108
+
109
+ records_to_upsert.append({
110
+ "id": doc_id,
111
+ "values": embeddings[i],
112
+ # "sparse_values": sparse_vector,
113
+ "metadata": {"text": chunk_text['text'], "header": chunk_text['header'], "page": chunk_text['page'], "type": chunk_text['type']}
114
+ })
115
+
116
+ batch_size = 100
117
+ for i in range(0, len(records_to_upsert), batch_size):
118
+ batch = records_to_upsert[i:i + batch_size]
119
+ self.index.upsert(
120
+ vectors=batch,
121
+ batch_size=batch_size
122
+ )
123
+ print(f"Successfully uploaded {len(records_to_upsert)} chunks from {pdf_filename} to Pinecone.")
124
+
125
+ def retrieve_chunks(self, query_text: str, pinecone: Pinecone, top_k: int = 5):
126
+ """
127
+ Retrieves top-k chunks based on the query using hybrid search.
128
+ """
129
+ # Generate sparse vector for the query using BM25Encoder
130
+ sparse_query_vector = self.bm25_encoder.encode_queries([query_text])
131
+
132
+ model = SentenceTransformer('BAAI/bge-base-en-v1.5')
133
+ embeddings = model.encode(f"query: {query_text}", batch_size=32, convert_to_numpy=True).tolist()
134
+
135
+ query_results = self.index.query(
136
+ vector=embeddings,
137
+ # sparse_vector=sparse_query_vector, # Include the sparse vector for hybrid search
138
+ top_k=top_k,
139
+ include_metadata=True,
140
+ include_values=False # No need to return the vectors themselves for RAG
141
+ )
142
+
143
+ retrieved_chunks = []
144
+ for match in query_results['matches']:
145
+ retrieved_chunks.append({
146
+ "id": match['id'],
147
+ "score": match['score'],
148
+ "text": match['metadata']['text'],
149
+ "header": match['metadata']['header'],
150
+ "page": match['metadata']['page'],
151
+ "type": match['metadata']['type']
152
+ })
153
+ return retrieved_chunks
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ httpx
4
+ requests
5
+ pydantic
6
+ python-multipart
7
+ pdfplumber
8
+ PyMuPDF
9
+ sentence-transformers
10
+ faiss-cpu
11
+ huggingface-hub
12
+ python-dotenv
13
+ google-genai
14
+ rank-bm25
15
+ regex
16
+ python-docx
17
+ langchain
18
+ pinecone
19
+ pinecone-text
semantic_aware.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import requests
4
+ import fitz # PyMuPDF
5
+ from docx import Document
6
+ from typing import List, Dict, Optional
7
+ from urllib.parse import urlparse
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+
10
+ class DocumentLoader:
11
+ def __init__(self, source: str):
12
+ self.source = source
13
+ self._validate_source()
14
+
15
+ def _validate_source(self):
16
+ if self.source.startswith("http"):
17
+ try:
18
+ head = requests.head(self.source, timeout=5, allow_redirects=True)
19
+ head.raise_for_status()
20
+ except Exception as e:
21
+ raise ValueError(f"URL inaccessible: {str(e)}")
22
+ elif not os.path.exists(self.source):
23
+ raise FileNotFoundError(f"Local file not found: {self.source}")
24
+
25
+ def extract(self) -> List[Dict]:
26
+ raise NotImplementedError("Subclasses must implement extract()")
27
+
28
+ class PDFLoader(DocumentLoader):
29
+ def __init__(self, source: str):
30
+ super().__init__(source)
31
+ self.is_url = self.source.startswith("http")
32
+ self.header_pattern = re.compile(
33
+ r"^(§|\d+\.\d+|\bARTICLE\b|\bCLAUSE\b|\bSECTION\b)",
34
+ re.IGNORECASE
35
+ )
36
+
37
+ def _download_pdf(self) -> str:
38
+ local_path = "temp_blob.pdf"
39
+ try:
40
+ response = requests.get(self.source, timeout=10, stream=True)
41
+ response.raise_for_status()
42
+ with open(local_path, 'wb') as f:
43
+ for chunk in response.iter_content(chunk_size=8192):
44
+ f.write(chunk)
45
+ return local_path
46
+ except Exception as e:
47
+ if os.path.exists(local_path):
48
+ os.remove(local_path)
49
+ raise RuntimeError(f"Download failed: {str(e)}")
50
+
51
+ def _extract_with_structure(self, pdf_path: str) -> str:
52
+ full_text = []
53
+ try:
54
+ with fitz.open(pdf_path) as doc:
55
+ for page_num, page in enumerate(doc, start=1):
56
+ text = page.get_text("text", flags=fitz.TEXT_PRESERVE_LIGATURES)
57
+ page_header = f"\n[PAGE {page_num}]\n"
58
+ full_text.append(page_header + text)
59
+ except Exception as e:
60
+ raise RuntimeError(f"PDF parsing failed: {str(e)}")
61
+ return "\n".join(full_text)
62
+
63
+ def _chunk_text(self, text: str) -> List[Dict]:
64
+ splitter = RecursiveCharacterTextSplitter(
65
+ chunk_size=1000,
66
+ chunk_overlap=200,
67
+ separators=[
68
+ "\n§", "\nArticle", "\nClause", "\nSECTION",
69
+ "\nSubsection", "\n\n", "\n", " ", ""
70
+ ]
71
+ )
72
+
73
+ chunks = splitter.split_text(text)
74
+ structured_chunks = []
75
+
76
+ for chunk in chunks:
77
+ header_match = self.header_pattern.search(chunk)
78
+ header = header_match.group(0).strip() if header_match else "General"
79
+ page_match = re.search(r"\[PAGE (\d+)\]", chunk)
80
+ page = int(page_match.group(1)) if page_match else 1
81
+
82
+ structured_chunks.append({
83
+ "text": chunk,
84
+ "header": header,
85
+ "page": page,
86
+ "type": "clause" if header_match else "text_block"
87
+ })
88
+
89
+ return structured_chunks
90
+
91
+ def _extract_tables(self, pdf_path: str) -> List[Dict]:
92
+ tables = []
93
+ try:
94
+ with fitz.open(pdf_path) as doc:
95
+ for page_num, page in enumerate(doc, start=1):
96
+ for table in page.find_tables():
97
+ table_data = []
98
+ for row in table.extract():
99
+ # Filter None values and convert to strings
100
+ cleaned_row = [str(cell) if cell is not None else "" for cell in row]
101
+ table_data.append("|".join(cleaned_row))
102
+
103
+ if table_data: # Only add non-empty tables
104
+ tables.append({
105
+ "text": f"[TABLE {page_num}.{len(tables)+1}]\n" + "\n".join(table_data),
106
+ "header": f"Table {page_num}.{len(tables)+1}",
107
+ "page": page_num,
108
+ "type": "table"
109
+ })
110
+ except Exception as e:
111
+ print(f"Table extraction warning: {str(e)}")
112
+ return tables
113
+
114
+ def extract(self) -> List[Dict]:
115
+ pdf_path = self._download_pdf() if self.is_url else self.source
116
+ try:
117
+ full_text = self._extract_with_structure(pdf_path)
118
+ chunks = self._chunk_text(full_text)
119
+ tables = self._extract_tables(pdf_path)
120
+ return chunks + tables
121
+ finally:
122
+ if self.is_url and os.path.exists(pdf_path):
123
+ try:
124
+ os.remove(pdf_path)
125
+ except PermissionError:
126
+ # Handle file lock issues on Windows
127
+ import time
128
+ time.sleep(0.1)
129
+ os.remove(pdf_path)
130
+
131
+ class DOCXLoader(DocumentLoader):
132
+ def extract(self) -> List[Dict]:
133
+ chunks = []
134
+ current_header = None
135
+
136
+ try:
137
+ doc = Document(self.source)
138
+ for para in doc.paragraphs:
139
+ text = para.text.strip()
140
+ if not text:
141
+ continue
142
+
143
+ if para.style.name.lower().startswith(('heading', 'title')):
144
+ current_header = text
145
+ continue
146
+
147
+ chunks.append({
148
+ "text": text,
149
+ "header": current_header or "General",
150
+ "style": para.style.name,
151
+ "type": "heading" if current_header else "paragraph"
152
+ })
153
+ except Exception as e:
154
+ raise RuntimeError(f"DOCX processing failed: {str(e)}")
155
+
156
+ return chunks
157
+
158
+ def load_document(source: str) -> Dict:
159
+ def _is_pdf(content: bytes) -> bool:
160
+ return content[:4] == b'%PDF'
161
+
162
+ def _is_docx(content: bytes) -> bool:
163
+ return (b'word/_rels' in content or
164
+ b'[Content_Types].xml' in content)
165
+
166
+ try:
167
+ # Content-based detection
168
+ if source.startswith("http"):
169
+ response = requests.get(source, stream=True)
170
+ response.raise_for_status()
171
+ sample = response.raw.read(1024)
172
+ else:
173
+ with open(source, 'rb') as f:
174
+ sample = f.read(1024)
175
+
176
+ if _is_pdf(sample):
177
+ loader = PDFLoader(source)
178
+ elif _is_docx(sample):
179
+ loader = DOCXLoader(source)
180
+ else:
181
+ raise ValueError("Unrecognized file format")
182
+
183
+ except Exception as e:
184
+ # Extension fallback
185
+ ext = os.path.splitext(urlparse(source).path if source.startswith("http") else source)[1].lower()
186
+ if ext == '.pdf':
187
+ loader = PDFLoader(source)
188
+ elif ext == '.docx':
189
+ loader = DOCXLoader(source)
190
+ else:
191
+ raise ValueError(f"Unsupported file type (extension: {ext})")
192
+
193
+ return {
194
+ "source": source,
195
+ "chunks": loader.extract()
196
+ }
197
+
198
+ if __name__ == '__main__':
199
+
200
+ output = load_document('https://hackrx.blob.core.windows.net/assets/hackrx_6/policies/CHOTGDP23004V012223.pdf?sv=2023-01-03&st=2025-07-30T06%3A46%3A49Z&se=2025-09-01T06%3A46%3A00Z&sr=c&sp=rl&sig=9szykRKdGYj0BVm1skP%2BX8N9%2FRENEn2k7MQPUp33jyQ%3D')
201
+ print("hello")
202
+ print(len(output['chunks']))