Srinivasulu kethanaboina commited on
Commit
0863426
·
verified ·
1 Parent(s): fcda2bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -25
app.py CHANGED
@@ -1,35 +1,100 @@
1
- import gradio as gr
2
- from fastapi import FastAPI, Request, Depends, HTTPException
3
  from fastapi.staticfiles import StaticFiles
4
- import uvicorn
 
 
 
 
 
 
 
 
5
 
6
- # Initialize FastAPI app
7
  app = FastAPI()
8
 
9
- # Define a function for the chat interface
10
- def respond_to_chat(history, message):
11
- response = f"Hello, {message}!"
12
- return response, history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- # Create Gradio ChatInterface
15
- chat = gr.ChatInterface(fn=respond_to_chat, title="Chat with AI")
16
 
17
- # Mount static files
18
- app.mount("/static", StaticFiles(directory="static", html=True), name="static")
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- # Function to authenticate users
21
- def authenticate(username: str, password: str):
22
- if username == "your_username" and password == "your_password":
23
- return True
24
- return False
25
 
26
- @app.get("/gradio")
27
- async def gradio_access(username: str, password: str):
28
- if authenticate(username, password):
29
- return gr.Interface.load("https://srinukethanaboina-srunu.hf.space/gradio") # Adjust URL if needed
 
 
 
 
 
 
30
  else:
31
- raise HTTPException(status_code=401, detail="Unauthorized")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- # Run the app with uvicorn
34
- if __name__ == "__main__":
35
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ from fastapi import FastAPI
2
+ from fastapi.responses import HTMLResponse
3
  from fastapi.staticfiles import StaticFiles
4
+ from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
+ from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
+ import os
8
+ import datetime
9
+ from dotenv import load_dotenv
10
+
11
+ # Load environment variables
12
+ load_dotenv()
13
 
 
14
  app = FastAPI()
15
 
16
+ # Serve static files (HTML, CSS, JS)
17
+ app.mount("/static", StaticFiles(directory="static"), name="static")
18
+
19
+ # Configure Llama index settings
20
+ Settings.llm = HuggingFaceInferenceAPI(
21
+ model_name="meta-llama/Meta-Llama-3-8B-Instruct",
22
+ tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
23
+ context_window=3000,
24
+ token=os.getenv("HF_TOKEN"),
25
+ max_new_tokens=512,
26
+ generate_kwargs={"temperature": 0.1},
27
+ )
28
+ Settings.embed_model = HuggingFaceEmbedding(
29
+ model_name="BAAI/bge-small-en-v1.5"
30
+ )
31
+
32
+ PERSIST_DIR = "db"
33
+ PDF_DIRECTORY = 'data'
34
+
35
+ # Ensure directories exist
36
+ os.makedirs(PDF_DIRECTORY, exist_ok=True)
37
+ os.makedirs(PERSIST_DIR, exist_ok=True)
38
+
39
+ def data_ingestion_from_directory():
40
+ documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
41
+ storage_context = StorageContext.from_defaults()
42
+ index = VectorStoreIndex.from_documents(documents)
43
+ index.storage_context.persist(persist_dir=PERSIST_DIR)
44
+
45
+ data_ingestion_from_directory() # Process PDF ingestion at startup
46
 
47
+ # Store chat history in-memory (you could also use a database)
48
+ chat_history = []
49
 
50
+ def handle_query(query):
51
+ chat_text_qa_msgs = [
52
+ (
53
+ "user",
54
+ f"""
55
+ You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. Give response within 10-15 words only
56
+ {context_str}
57
+ Question:
58
+ {query}
59
+ """
60
+ )
61
+ ]
62
+ text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
63
 
64
+ # Load index from storage
65
+ storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
66
+ index = load_index_from_storage(storage_context)
 
 
67
 
68
+ # Use chat history to enhance response
69
+ context_str = "\n".join([f"User asked: '{msg['message']}'\nBot answered: '{msg['response']}'" for msg in chat_history])
70
+
71
+ query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
72
+ answer = query_engine.query(query)
73
+
74
+ if hasattr(answer, 'response'):
75
+ return answer.response
76
+ elif isinstance(answer, dict) and 'response' in answer:
77
+ return answer['response']
78
  else:
79
+ return "Sorry, I couldn't find an answer."
80
+
81
+ @app.get("/", response_class=HTMLResponse)
82
+ async def read_root():
83
+ with open("static/index.html") as f:
84
+ return f.read()
85
+
86
+ @app.post("/chat/")
87
+ async def chat(message: str):
88
+ response = handle_query(message)
89
+
90
+ message_data = {
91
+ "sender": "User",
92
+ "message": message,
93
+ "response": response,
94
+ "timestamp": datetime.datetime.now().isoformat()
95
+ }
96
+
97
+ # Store the interaction in chat history
98
+ chat_history.append(message_data)
99
 
100
+ return {"response": response}