Update app.py
Browse files
app.py
CHANGED
@@ -1,161 +1,138 @@
|
|
1 |
-
|
2 |
-
import
|
|
|
3 |
import gradio as gr
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
#
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
-
#
|
102 |
-
|
103 |
-
|
104 |
-
prompt += f"<|start_header_id|>assistant<|end_header_id|>\n\n{assistant_msg}<|eot_id|>"
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
print(prompt)
|
109 |
-
return prompt
|
110 |
-
|
111 |
-
def chat_with_model(user_input, chat_history=[]):
|
112 |
-
# Search for relevant products
|
113 |
-
search_results = search_products(user_input)
|
114 |
-
|
115 |
-
# Create context with search results
|
116 |
-
if search_results:
|
117 |
-
context = "Product Context:\n"
|
118 |
-
for product in search_results:
|
119 |
-
context += f"Produkt ID: {product['ID']}\n"
|
120 |
-
context += f"Name: {product['Name']}\n"
|
121 |
-
context += f"Beschreibung: {product['Description']}\n"
|
122 |
-
context += f"Preis: {product['Price']}€\n"
|
123 |
-
context += f"Bewertung: {product['Rating']} ({product['RatingCount']} Bewertungen)\n"
|
124 |
-
context += f"Kategorie: {product['ProductCategory']}\n"
|
125 |
-
context += f"Marke: {product['Brand']}\n"
|
126 |
-
context += "---\n"
|
127 |
-
else:
|
128 |
-
context = "Das weiß ich nicht."
|
129 |
-
print("context: ------------------------------------- \n"+context)
|
130 |
-
# Pass both user_input and context to construct_prompt
|
131 |
-
prompt = construct_prompt(user_input, context, chat_history) # This line is changed
|
132 |
-
print("prompt: ------------------------------------- \n"+prompt)
|
133 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=4096).to("cpu")
|
134 |
-
tokenizer.pad_token = tokenizer.eos_token
|
135 |
-
attention_mask = torch.ones_like(input_ids).to("cpu")
|
136 |
-
outputs = model.generate(input_ids, attention_mask=attention_mask,
|
137 |
-
max_new_tokens=1200, do_sample=True,
|
138 |
-
top_k=50, temperature=0.7)
|
139 |
-
response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
|
140 |
-
print("respone: ------------------------------------- \n"+response)
|
141 |
-
chat_history.append((context, response)) # or chat_history.append((user_input, response)) if you want to store user input
|
142 |
-
return response, chat_history
|
143 |
-
|
144 |
-
#####
|
145 |
-
###
|
146 |
-
###
|
147 |
-
# Gradio Interface
|
148 |
-
def gradio_interface(user_input, history):
|
149 |
-
response, updated_history = chat_with_model(user_input, history)
|
150 |
-
return response, updated_history
|
151 |
-
|
152 |
-
with gr.Blocks() as demo:
|
153 |
-
gr.Markdown("# 🦙 Llama Instruct Chat with ChromaDB Integration")
|
154 |
-
with gr.Row():
|
155 |
-
user_input = gr.Textbox(label="Your Message", lines=2, placeholder="Type your message here...")
|
156 |
-
submit_btn = gr.Button("Send")
|
157 |
-
chat_history = gr.State([])
|
158 |
-
chat_display = gr.Textbox(label="Chat Response", lines=10, placeholder="Chat history will appear here...", interactive=False)
|
159 |
-
submit_btn.click(gradio_interface, inputs=[user_input, chat_history], outputs=[chat_display, chat_history])
|
160 |
-
|
161 |
-
demo.launch(debug=True)
|
|
|
1 |
+
# main.py
|
2 |
+
from fastapi import FastAPI, Depends, HTTPException, BackgroundTasks
|
3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
4 |
import gradio as gr
|
5 |
+
from services.chat_service import ChatService
|
6 |
+
from services.data_service import DataService
|
7 |
+
from services.faq_service import FAQService
|
8 |
+
from auth.auth_handler import get_api_key
|
9 |
+
from models.base_models import UserInput, SearchQuery
|
10 |
+
import logging
|
11 |
+
import asyncio
|
12 |
+
|
13 |
+
# Configure logging
|
14 |
+
logging.basicConfig(
|
15 |
+
level=logging.INFO,
|
16 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
17 |
+
handlers=[
|
18 |
+
logging.FileHandler('chatbot.log'),
|
19 |
+
logging.StreamHandler()
|
20 |
+
]
|
21 |
+
)
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
# Initialize FastAPI app
|
25 |
+
app = FastAPI(title="Bofrost Chat API", version="2.0.0")
|
26 |
+
|
27 |
+
# Add CORS middleware
|
28 |
+
app.add_middleware(
|
29 |
+
CORSMiddleware,
|
30 |
+
allow_origins=["*"],
|
31 |
+
allow_credentials=True,
|
32 |
+
allow_methods=["*"],
|
33 |
+
allow_headers=["*"],
|
34 |
+
)
|
35 |
+
|
36 |
+
# Initialize services
|
37 |
+
model_service = ModelService()
|
38 |
+
data_service = DataService(model_service)
|
39 |
+
faq_service = FAQService(model_service)
|
40 |
+
chat_service = ChatService(model_service, data_service, faq_service)
|
41 |
+
|
42 |
+
# API endpoints
|
43 |
+
@app.post("/api/chat")
|
44 |
+
async def chat_endpoint(
|
45 |
+
user_input: UserInput,
|
46 |
+
api_key: str = Depends(get_api_key),
|
47 |
+
background_tasks: BackgroundTasks
|
48 |
+
):
|
49 |
+
try:
|
50 |
+
response, updated_history, search_results = await chat_service.chat(
|
51 |
+
user_input.user_input,
|
52 |
+
user_input.chat_history
|
53 |
+
)
|
54 |
+
return {
|
55 |
+
"status": "success",
|
56 |
+
"response": response,
|
57 |
+
"chat_history": updated_history,
|
58 |
+
"search_results": search_results
|
59 |
+
}
|
60 |
+
except Exception as e:
|
61 |
+
logger.error(f"Error in chat endpoint: {e}")
|
62 |
+
raise HTTPException(status_code=500, detail=str(e))
|
63 |
+
|
64 |
+
@app.post("/api/search")
|
65 |
+
async def search_endpoint(
|
66 |
+
query: SearchQuery,
|
67 |
+
api_key: str = Depends(get_api_key)
|
68 |
+
):
|
69 |
+
try:
|
70 |
+
results = await data_service.search(query.query, query.top_k)
|
71 |
+
return {"results": results}
|
72 |
+
except Exception as e:
|
73 |
+
logger.error(f"Error in search endpoint: {e}")
|
74 |
+
raise HTTPException(status_code=500, detail=str(e))
|
75 |
+
|
76 |
+
@app.post("/api/faq/search")
|
77 |
+
async def faq_search_endpoint(
|
78 |
+
query: SearchQuery,
|
79 |
+
api_key: str = Depends(get_api_key)
|
80 |
+
):
|
81 |
+
try:
|
82 |
+
results = await faq_service.search_faqs(query.query, query.top_k)
|
83 |
+
return {"results": results}
|
84 |
+
except Exception as e:
|
85 |
+
logger.error(f"Error in FAQ search endpoint: {e}")
|
86 |
+
raise HTTPException(status_code=500, detail=str(e))
|
87 |
+
|
88 |
+
# Gradio interface
|
89 |
+
def create_gradio_interface():
|
90 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
91 |
+
gr.Markdown("# 🦙 Bofrost Chat Assistant\nFragen Sie nach Produkten, Rezepten und mehr!")
|
92 |
+
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column(scale=4):
|
95 |
+
chat_display = gr.Chatbot(label="Chat-Verlauf", height=400)
|
96 |
+
user_input = gr.Textbox(
|
97 |
+
label="Ihre Nachricht",
|
98 |
+
placeholder="Stellen Sie Ihre Frage...",
|
99 |
+
lines=2
|
100 |
+
)
|
101 |
+
|
102 |
+
with gr.Column(scale=2):
|
103 |
+
with gr.Accordion("Zusätzliche Informationen", open=False):
|
104 |
+
product_info = gr.JSON(label="Produktdetails")
|
105 |
+
|
106 |
+
with gr.Row():
|
107 |
+
submit_btn = gr.Button("Senden", variant="primary")
|
108 |
+
clear_btn = gr.Button("Chat löschen")
|
109 |
+
|
110 |
+
chat_history = gr.State([])
|
111 |
+
|
112 |
+
async def respond(message, history):
|
113 |
+
response, updated_history, search_results = await chat_service.chat(message, history)
|
114 |
+
return response, updated_history, search_results
|
115 |
+
|
116 |
+
submit_btn.click(
|
117 |
+
respond,
|
118 |
+
inputs=[user_input, chat_history],
|
119 |
+
outputs=[chat_display, chat_history, product_info]
|
120 |
+
)
|
121 |
+
|
122 |
+
clear_btn.click(
|
123 |
+
lambda: ([], [], None),
|
124 |
+
outputs=[chat_display, chat_history, product_info]
|
125 |
+
)
|
126 |
+
|
127 |
+
demo.queue()
|
128 |
+
return demo
|
129 |
+
|
130 |
+
if __name__ == "__main__":
|
131 |
+
import uvicorn
|
132 |
|
133 |
+
# Create and launch Gradio interface
|
134 |
+
demo = create_gradio_interface()
|
135 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
136 |
|
137 |
+
# Start FastAPI server
|
138 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|