Spaces:
Runtime error
Runtime error
import urllib | |
import warnings | |
from pathlib import Path as p | |
from pprint import pprint | |
import os | |
import time | |
from fastapi import FastAPI, Request | |
from fastapi.responses import HTMLResponse | |
from fastapi.staticfiles import StaticFiles | |
import pandas as pd | |
from langchain import PromptTemplate | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
from langchain.chains import RetrievalQA | |
import os | |
from fastapi.responses import JSONResponse | |
from pydantic import BaseModel # Add this import | |
# restart python kernal if issues with langchain import. | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain_google_genai import GoogleGenerativeAIEmbeddings | |
import uuid # for generating unique IDs | |
import datetime | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.templating import Jinja2Templates | |
import json | |
from deep_translator import GoogleTranslator | |
import re | |
GOOGLE_API_KEY= os.getenv("HF_TOKEN") | |
warnings.filterwarnings("ignore") | |
model = ChatGoogleGenerativeAI(model="gemini-pro",google_api_key=GOOGLE_API_KEY, | |
temperature=0.2,convert_system_message_to_human=True) | |
pdf_loader = PyPDFLoader("data/hotel.pdf") | |
pages = pdf_loader.load_and_split() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) | |
context = "\n\n".join(str(p.page_content) for p in pages) | |
texts = text_splitter.split_text(context) | |
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001",google_api_key=GOOGLE_API_KEY) | |
vector_index = Chroma.from_texts(texts, embeddings).as_retriever(search_kwargs={"k":10}) | |
qa_chain = RetrievalQA.from_chain_type( | |
model, | |
retriever=vector_index, | |
return_source_documents=True | |
) | |
class MessageRequest(BaseModel): # Define the MessageRequest model | |
message: str | |
language: str | |
app = FastAPI() | |
async def add_security_headers(request: Request, call_next): | |
response = await call_next(request) | |
response.headers["Content-Security-Policy"] = "frame-ancestors *; frame-src *; object-src *;" | |
response.headers["X-Frame-Options"] = "ALLOWALL" | |
return response | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
async def favicon(): | |
return HTMLResponse("") # or serve a real favicon if you have one | |
app.mount("/static", StaticFiles(directory="static"), name="static") | |
templates = Jinja2Templates(directory="static") | |
async def load_chat(request: Request, id: str): | |
return templates.TemplateResponse("index.html", {"request": request, "user_id": id}) | |
async def chat(request: MessageRequest): | |
message = request.message # Access the message from the request body | |
language = request.language | |
language_code = request.language.split('-')[0] | |
response = qa_chain({"query": message}) | |
response1 = response['result'] # Correctly access the response result | |
try: | |
translator = GoogleTranslator(source='en', target=language_code) # Translate to target language | |
response1 = translator.translate(response1) | |
print(response1) | |
except Exception as e: | |
# Handle translation errors | |
print(f"Translation error: {e}") | |
response1 = "Sorry, I couldn't translate the response." | |
print(f"Selected Language: {language}") | |
return {"response": response1} | |
def read_root(request: Request): | |
return templates.TemplateResponse("home.html", {"request": request}) |