Update app.py
Browse files
app.py
CHANGED
@@ -4,19 +4,15 @@ import gradio as gr
|
|
4 |
from langfuse import Langfuse
|
5 |
from langfuse.decorators import observe, langfuse_context
|
6 |
import os
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Initialize Langfuse
|
9 |
-
#langfuse = Langfuse(
|
10 |
-
# secret_key="sk-lf-229e10c5-6210-4a4b-a432-0f17bc66e56c",
|
11 |
-
# public_key="pk-lf-9f2c32d2-266f-421d-9b87-51377f0a268c",
|
12 |
-
# host="https://chris4k-langfuse-template-space.hf.space"
|
13 |
-
#)
|
14 |
-
|
15 |
-
# Get keys for your project from the project settings page
|
16 |
-
# https://cloud.langfuse.com
|
17 |
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-9f2c32d2-266f-421d-9b87-51377f0a268c"
|
18 |
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-229e10c5-6210-4a4b-a432-0f17bc66e56c"
|
19 |
-
os.environ["LANGFUSE_HOST"] = "https://chris4k-langfuse-template-space.hf.space"
|
20 |
|
21 |
langfuse = Langfuse()
|
22 |
|
@@ -26,10 +22,6 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
26 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None, torch_dtype=torch.float32)
|
27 |
|
28 |
# Load FAISS and Embeddings
|
29 |
-
import faiss
|
30 |
-
import pandas as pd
|
31 |
-
from sentence_transformers import SentenceTransformer
|
32 |
-
|
33 |
embedder = SentenceTransformer('distiluse-base-multilingual-cased')
|
34 |
url = 'https://www.bofrost.de/datafeed/DE/products.csv'
|
35 |
data = pd.read_csv(url, sep='|')
|
@@ -67,6 +59,22 @@ def construct_prompt(user_input, context, chat_history, max_history_turns=1):
|
|
67 |
# Main function to interact with the model
|
68 |
@observe()
|
69 |
def chat_with_model(user_input, chat_history=[]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
# Search for products
|
71 |
search_results = search_products(user_input)
|
72 |
if search_results:
|
@@ -76,18 +84,40 @@ def chat_with_model(user_input, chat_history=[]):
|
|
76 |
else:
|
77 |
context = "Das weiß ich nicht."
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
langfuse_context.update_current_observation(
|
80 |
input={"query": user_input},
|
81 |
output={"context": context},
|
82 |
metadata={"search_results_found": len(search_results)}
|
83 |
)
|
84 |
|
85 |
-
# Generate prompt
|
86 |
prompt = construct_prompt(user_input, context, chat_history)
|
87 |
input_ids = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=4096)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
outputs = model.generate(input_ids, max_new_tokens=1200, do_sample=True, top_k=50, temperature=0.7)
|
89 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
|
|
91 |
langfuse_context.update_current_observation(
|
92 |
usage_details={
|
93 |
"input_tokens": len(input_ids[0]),
|
@@ -95,7 +125,16 @@ def chat_with_model(user_input, chat_history=[]):
|
|
95 |
}
|
96 |
)
|
97 |
|
|
|
98 |
chat_history.append((user_input, response))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
return response, chat_history
|
100 |
|
101 |
# Gradio interface
|
@@ -104,7 +143,7 @@ def gradio_interface(user_input, history):
|
|
104 |
return response, updated_history
|
105 |
|
106 |
with gr.Blocks() as demo:
|
107 |
-
gr.Markdown("# 🦙 Llama Instruct Chat with LangFuse Integration")
|
108 |
user_input = gr.Textbox(label="Your Message", lines=2)
|
109 |
submit_btn = gr.Button("Send")
|
110 |
chat_history = gr.State([])
|
|
|
4 |
from langfuse import Langfuse
|
5 |
from langfuse.decorators import observe, langfuse_context
|
6 |
import os
|
7 |
+
import faiss
|
8 |
+
import pandas as pd
|
9 |
+
from sentence_transformers import SentenceTransformer
|
10 |
+
import datetime
|
11 |
|
12 |
# Initialize Langfuse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-9f2c32d2-266f-421d-9b87-51377f0a268c"
|
14 |
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-229e10c5-6210-4a4b-a432-0f17bc66e56c"
|
15 |
+
os.environ["LANGFUSE_HOST"] = "https://chris4k-langfuse-template-space.hf.space" # 🇪🇺 EU region
|
16 |
|
17 |
langfuse = Langfuse()
|
18 |
|
|
|
22 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None, torch_dtype=torch.float32)
|
23 |
|
24 |
# Load FAISS and Embeddings
|
|
|
|
|
|
|
|
|
25 |
embedder = SentenceTransformer('distiluse-base-multilingual-cased')
|
26 |
url = 'https://www.bofrost.de/datafeed/DE/products.csv'
|
27 |
data = pd.read_csv(url, sep='|')
|
|
|
59 |
# Main function to interact with the model
|
60 |
@observe()
|
61 |
def chat_with_model(user_input, chat_history=[]):
|
62 |
+
# Start trace for the entire chat process
|
63 |
+
trace = langfuse.trace(
|
64 |
+
name="ai-chat-execution",
|
65 |
+
user_id="user_12345",
|
66 |
+
metadata={"email": "[email protected]"},
|
67 |
+
tags=["chat", "product-query"],
|
68 |
+
release="v1.0.0"
|
69 |
+
)
|
70 |
+
|
71 |
+
# Span for product search
|
72 |
+
retrieval_span = trace.span(
|
73 |
+
name="product-retrieval",
|
74 |
+
metadata={"source": "faiss-index"},
|
75 |
+
input={"query": user_input}
|
76 |
+
)
|
77 |
+
|
78 |
# Search for products
|
79 |
search_results = search_products(user_input)
|
80 |
if search_results:
|
|
|
84 |
else:
|
85 |
context = "Das weiß ich nicht."
|
86 |
|
87 |
+
# End product search span with results
|
88 |
+
retrieval_span.end(
|
89 |
+
output={"search_results": search_results},
|
90 |
+
status_message=f"Found {len(search_results)} products"
|
91 |
+
)
|
92 |
+
|
93 |
+
# Update trace with search context
|
94 |
langfuse_context.update_current_observation(
|
95 |
input={"query": user_input},
|
96 |
output={"context": context},
|
97 |
metadata={"search_results_found": len(search_results)}
|
98 |
)
|
99 |
|
100 |
+
# Generate prompt for Llama model
|
101 |
prompt = construct_prompt(user_input, context, chat_history)
|
102 |
input_ids = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=4096)
|
103 |
+
|
104 |
+
# Span for AI generation
|
105 |
+
generation_span = trace.span(
|
106 |
+
name="ai-response-generation",
|
107 |
+
metadata={"model": "Llama-3.2-3B-Instruct"},
|
108 |
+
input={"prompt": prompt}
|
109 |
+
)
|
110 |
+
|
111 |
outputs = model.generate(input_ids, max_new_tokens=1200, do_sample=True, top_k=50, temperature=0.7)
|
112 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
113 |
+
|
114 |
+
# End model generation span
|
115 |
+
generation_span.end(
|
116 |
+
output={"response": response},
|
117 |
+
status_message="AI response generated"
|
118 |
+
)
|
119 |
|
120 |
+
# Update Langfuse context with usage details
|
121 |
langfuse_context.update_current_observation(
|
122 |
usage_details={
|
123 |
"input_tokens": len(input_ids[0]),
|
|
|
125 |
}
|
126 |
)
|
127 |
|
128 |
+
# Append the response to the chat history
|
129 |
chat_history.append((user_input, response))
|
130 |
+
|
131 |
+
# Update trace final output
|
132 |
+
trace.update(
|
133 |
+
metadata={"final_status": "completed"},
|
134 |
+
output={"summary": response}
|
135 |
+
)
|
136 |
+
|
137 |
+
# Return the response
|
138 |
return response, chat_history
|
139 |
|
140 |
# Gradio interface
|
|
|
143 |
return response, updated_history
|
144 |
|
145 |
with gr.Blocks() as demo:
|
146 |
+
gr.Markdown("# 🦙 Llama Instruct Chat with LangFuse & Faiss Integration")
|
147 |
user_input = gr.Textbox(label="Your Message", lines=2)
|
148 |
submit_btn = gr.Button("Send")
|
149 |
chat_history = gr.State([])
|