Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -48,25 +48,23 @@ def messages_to_history(messages: Messages) -> Tuple[str, History]:
|
|
48 |
return system, history
|
49 |
|
50 |
# Main function for chat
|
51 |
-
import requests
|
52 |
-
|
53 |
def model_chat(query: Optional[str], history: Optional[History], system: str) -> Tuple[str, str, History]:
|
54 |
if query is None:
|
55 |
query = ''
|
56 |
if history is None:
|
57 |
history = []
|
58 |
|
59 |
-
# Convert history to a
|
60 |
messages = history_to_messages(history, system)
|
61 |
messages.append({'role': 'user', 'content': query})
|
62 |
|
63 |
# Prepare the payload for Hugging Face Inference API
|
64 |
payload = {"inputs": query, "parameters": {"max_new_tokens": 150}, "history": messages}
|
65 |
-
headers = {"Authorization": f"Bearer {os.getenv('
|
66 |
|
67 |
try:
|
68 |
# Request generation with Hugging Face Inference API
|
69 |
-
response = requests.post(f"https://api-inference.huggingface.co/models/
|
70 |
json=payload, headers=headers)
|
71 |
|
72 |
if response.status_code == 200:
|
@@ -81,19 +79,20 @@ def model_chat(query: Optional[str], history: Optional[History], system: str) ->
|
|
81 |
# Log the chat to file
|
82 |
log_history_to_file(query, response_text)
|
83 |
|
84 |
-
# Update history and return
|
|
|
85 |
system, history = messages_to_history(messages + [{'role': 'assistant', 'content': response_text}])
|
86 |
-
|
87 |
else:
|
88 |
# Log error message to file and display it in Gradio
|
89 |
error_message = f"Error {response.status_code}: {response.json().get('error', response.text)}"
|
90 |
log_history_to_file(query, error_message)
|
91 |
-
|
92 |
except Exception as e:
|
93 |
# Log any unexpected exceptions
|
94 |
error_message = f"Exception: {str(e)}"
|
95 |
log_history_to_file(query, error_message)
|
96 |
-
|
97 |
|
98 |
|
99 |
# Gradio Interface Setup
|
|
|
48 |
return system, history
|
49 |
|
50 |
# Main function for chat
|
|
|
|
|
51 |
def model_chat(query: Optional[str], history: Optional[History], system: str) -> Tuple[str, str, History]:
|
52 |
if query is None:
|
53 |
query = ''
|
54 |
if history is None:
|
55 |
history = []
|
56 |
|
57 |
+
# Convert history to a list of messages
|
58 |
messages = history_to_messages(history, system)
|
59 |
messages.append({'role': 'user', 'content': query})
|
60 |
|
61 |
# Prepare the payload for Hugging Face Inference API
|
62 |
payload = {"inputs": query, "parameters": {"max_new_tokens": 150}, "history": messages}
|
63 |
+
headers = {"Authorization": f"Bearer {os.getenv('YOUR_API_TOKEN')}"}
|
64 |
|
65 |
try:
|
66 |
# Request generation with Hugging Face Inference API
|
67 |
+
response = requests.post(f"https://api-inference.huggingface.co/models/YourModelNameHere",
|
68 |
json=payload, headers=headers)
|
69 |
|
70 |
if response.status_code == 200:
|
|
|
79 |
# Log the chat to file
|
80 |
log_history_to_file(query, response_text)
|
81 |
|
82 |
+
# Update history with the new assistant response and return it
|
83 |
+
history.append([query, response_text])
|
84 |
system, history = messages_to_history(messages + [{'role': 'assistant', 'content': response_text}])
|
85 |
+
return response_text, history, system
|
86 |
else:
|
87 |
# Log error message to file and display it in Gradio
|
88 |
error_message = f"Error {response.status_code}: {response.json().get('error', response.text)}"
|
89 |
log_history_to_file(query, error_message)
|
90 |
+
return error_message, history, system
|
91 |
except Exception as e:
|
92 |
# Log any unexpected exceptions
|
93 |
error_message = f"Exception: {str(e)}"
|
94 |
log_history_to_file(query, error_message)
|
95 |
+
return error_message, history, system
|
96 |
|
97 |
|
98 |
# Gradio Interface Setup
|