ambrosfitz commited on
Commit
0d8c0a7
·
verified ·
1 Parent(s): a52aae3

Delete app.py.old

Browse files
Files changed (1) hide show
  1. app.py.old +0 -174
app.py.old DELETED
@@ -1,174 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import requests
4
- import json
5
- import logging
6
- from dotenv import load_dotenv
7
-
8
- # Load environment variables
9
- load_dotenv()
10
-
11
- # API Keys configuration
12
- COHERE_API_KEY = os.getenv("COHERE_API_KEY")
13
- MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
14
-
15
- if not COHERE_API_KEY or not MISTRAL_API_KEY:
16
- raise ValueError("Missing required API keys in environment variables")
17
-
18
- # API endpoints configuration
19
- COHERE_API_URL = "https://api.cohere.ai/v1/chat"
20
- MISTRAL_API_URL = "https://api.mistral.ai/v1/chat/completions"
21
- VECTOR_API_URL = "https://sendthat.cc"
22
- HISTORY_INDEX = "onramps"
23
-
24
- # Model configurations
25
- MODELS = {
26
- "Cohere": {
27
- "name": "command-r-08-2024",
28
- "api_url": COHERE_API_URL,
29
- "api_key": COHERE_API_KEY
30
- },
31
- "Mistral": {
32
- "name": "ft:open-mistral-nemo:ef730d29:20241022:2a0e7d46",
33
- "api_url": MISTRAL_API_URL,
34
- "api_key": MISTRAL_API_KEY
35
- }
36
- }
37
-
38
- def search_document(query, k):
39
- try:
40
- url = f"{VECTOR_API_URL}/search/{HISTORY_INDEX}"
41
- payload = {"text": query, "k": k}
42
- headers = {"Content-Type": "application/json"}
43
- response = requests.post(url, json=payload, headers=headers)
44
- response.raise_for_status()
45
- return response.json(), "", k
46
- except requests.exceptions.RequestException as e:
47
- logging.error(f"Error in search: {e}")
48
- return {"error": str(e)}, query, k
49
-
50
- def generate_answer_cohere(question, context, citations):
51
- headers = {
52
- "Authorization": f"Bearer {MODELS['Cohere']['api_key']}",
53
- "Content-Type": "application/json"
54
- }
55
-
56
- prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer the question based on the given context. Include citations as [1], [2], etc.:"
57
-
58
- payload = {
59
- "message": prompt,
60
- "model": MODELS['Cohere']['name'],
61
- "preamble": "You are an AI-assistant chatbot. Provide thorough responses with citations.",
62
- "chat_history": []
63
- }
64
-
65
- try:
66
- response = requests.post(MODELS['Cohere']['api_url'], headers=headers, json=payload)
67
- response.raise_for_status()
68
- answer = response.json()['text']
69
-
70
- answer += "\n\nSources:"
71
- for i, citation in enumerate(citations, 1):
72
- answer += f"\n[{i}] {citation}"
73
-
74
- return answer
75
- except requests.exceptions.RequestException as e:
76
- logging.error(f"Error in generate_answer_cohere: {e}")
77
- return f"An error occurred: {str(e)}"
78
-
79
- def generate_answer_mistral(question, context, citations):
80
- headers = {
81
- "Authorization": f"Bearer {MODELS['Mistral']['api_key']}",
82
- "Content-Type": "application/json",
83
- "Accept": "application/json"
84
- }
85
-
86
- prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer the question based on the given context. Include citations as [1], [2], etc.:"
87
-
88
- payload = {
89
- "model": MODELS['Mistral']['name'],
90
- "messages": [
91
- {
92
- "role": "user",
93
- "content": prompt
94
- }
95
- ]
96
- }
97
-
98
- try:
99
- response = requests.post(MODELS['Mistral']['api_url'], headers=headers, json=payload)
100
- response.raise_for_status()
101
- answer = response.json()['choices'][0]['message']['content']
102
-
103
- answer += "\n\nSources:"
104
- for i, citation in enumerate(citations, 1):
105
- answer += f"\n[{i}] {citation}"
106
-
107
- return answer
108
- except requests.exceptions.RequestException as e:
109
- logging.error(f"Error in generate_answer_mistral: {e}")
110
- return f"An error occurred: {str(e)}"
111
-
112
- def answer_question(question, model_choice, k=3):
113
- # Search the vector database
114
- search_results, _, _ = search_document(question, k)
115
-
116
- # Extract and combine the retrieved contexts
117
- if "results" in search_results:
118
- contexts = []
119
- citations = []
120
- for item in search_results['results']:
121
- contexts.append(item['metadata']['content'])
122
- citations.append(f"{item['metadata'].get('title', 'Unknown Source')} - {item['metadata'].get('source', 'No source provided')}")
123
- combined_context = " ".join(contexts)
124
- else:
125
- logging.error(f"Error in database search or no results found: {search_results}")
126
- combined_context = ""
127
- citations = []
128
-
129
- # Generate answer using the selected model
130
- if model_choice == "Cohere":
131
- return generate_answer_cohere(question, combined_context, citations)
132
- else:
133
- return generate_answer_mistral(question, combined_context, citations)
134
-
135
- def chatbot(message, history, model_choice):
136
- response = answer_question(message, model_choice)
137
- return response
138
-
139
- # Example questions with default model choice
140
- EXAMPLE_QUESTIONS = [
141
- ["Why was Anne Hutchinson banished from Massachusetts?", "Cohere"],
142
- ["What were the major causes of World War I?", "Cohere"],
143
- ["Who was the first President of the United States?", "Cohere"],
144
- ["What was the significance of the Industrial Revolution?", "Cohere"]
145
- ]
146
-
147
- # Create Gradio interface
148
- with gr.Blocks(theme="soft") as iface:
149
- gr.Markdown("# History Chatbot")
150
- gr.Markdown("Ask me anything about history, and I'll provide answers with citations!")
151
-
152
- with gr.Row():
153
- model_choice = gr.Radio(
154
- choices=["Cohere", "Mistral"],
155
- value="Cohere",
156
- label="Choose LLM Model",
157
- info="Select which AI model to use for generating responses"
158
- )
159
-
160
- chatbot_interface = gr.ChatInterface(
161
- fn=lambda message, history, model: chatbot(message, history, model),
162
- additional_inputs=[model_choice],
163
- chatbot=gr.Chatbot(height=300),
164
- textbox=gr.Textbox(placeholder="Ask a question about history...", container=False, scale=7),
165
- examples=EXAMPLE_QUESTIONS, # Now properly formatted with model choice
166
- cache_examples=False,
167
- retry_btn=None,
168
- undo_btn="Delete Previous",
169
- clear_btn="Clear",
170
- )
171
-
172
- # Launch the app
173
- if __name__ == "__main__":
174
- iface.launch()