Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,68 +2,97 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import requests
|
4 |
import json
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
}
|
10 |
-
entire_assistant_response = ""
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
entire_assistant_response = "" # Reset the entire assistant response
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
"model": "NousResearch/Nous-Hermes-2-Yi-34B",
|
21 |
-
"temperature": 1.1,
|
22 |
-
"top_p": 0.9,
|
23 |
-
"top_k": 50,
|
24 |
-
"repetition_penalty": 1,
|
25 |
-
"n": 1,
|
26 |
-
"messages": all_message,
|
27 |
-
"stream_tokens": True,
|
28 |
-
}
|
29 |
-
|
30 |
-
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
|
31 |
-
headers = {
|
32 |
-
"accept": "application/json",
|
33 |
-
"content-type": "application/json",
|
34 |
-
"Authorization": f"Bearer {TOGETHER_API_KEY}",
|
35 |
-
}
|
36 |
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
if decoded_line
|
52 |
-
|
|
|
|
|
53 |
chunk_data = json.loads(decoded_line)
|
54 |
content = chunk_data['choices'][0]['delta']['content']
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
print(f"Invalid JSON received: {decoded_line}")
|
60 |
-
continue
|
61 |
-
except KeyError as e:
|
62 |
-
print(f"KeyError encountered: {e}")
|
63 |
-
continue
|
64 |
-
|
65 |
-
print(entire_assistant_response)
|
66 |
-
all_message.append({"role": "assistant", "content": entire_assistant_response})
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
|
|
|
2 |
import os
|
3 |
import requests
|
4 |
import json
|
5 |
+
from typing import Optional
|
6 |
|
7 |
+
# Define constants
|
8 |
+
TOGETHER_API_URL = "https://api.together.xyz/v1/chat/completions"
|
9 |
+
MODEL_NAME = "NousResearch/Nous-Hermes-2-Yi-34B"
|
10 |
+
HEADERS = {"accept": "application/json", "content-type": "application/json"}
|
|
|
11 |
|
12 |
+
# Initialize message history
|
13 |
+
all_message = [{"role": "system", "content": "... system message ..."}]
|
|
|
14 |
|
15 |
+
def validate_input(message: str) -> bool:
|
16 |
+
"""Validate the user input before sending it to the API."""
|
17 |
+
# Add input validation logic here, such as checking length, content, etc.
|
18 |
+
return True # Placeholder for actual validation logic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
def get_token() -> Optional[str]:
|
21 |
+
"""Retrieve the API token from the environment variables."""
|
22 |
+
try:
|
23 |
+
return os.environ['TOGETHER_API_KEY']
|
24 |
+
except KeyError:
|
25 |
+
print("The TOGETHER_API_KEY environment variable is not set.")
|
26 |
+
return None
|
27 |
|
28 |
+
def post_request(payload: dict, headers: dict) -> requests.Response:
|
29 |
+
"""Send a POST request to the API."""
|
30 |
+
try:
|
31 |
+
response = requests.post(TOGETHER_API_URL, json=payload, headers=headers, stream=True)
|
32 |
+
response.raise_for_status()
|
33 |
+
return response
|
34 |
+
except requests.exceptions.RequestException as e:
|
35 |
+
print(f"An error occurred while making the API request: {e}")
|
36 |
+
return None
|
37 |
|
38 |
+
def process_stream(response: requests.Response) -> str:
|
39 |
+
"""Process the streamed response from the API."""
|
40 |
+
assistant_response = ""
|
41 |
+
try:
|
42 |
+
for line in response.iter_lines():
|
43 |
+
if line:
|
44 |
+
decoded_line = line.decode('utf-8').strip()
|
45 |
+
if decoded_line == "data: [DONE]":
|
46 |
+
return assistant_response
|
47 |
+
elif decoded_line.startswith("data: "):
|
48 |
+
decoded_line = decoded_line.removeprefix("data: ")
|
49 |
chunk_data = json.loads(decoded_line)
|
50 |
content = chunk_data['choices'][0]['delta']['content']
|
51 |
+
assistant_response += content
|
52 |
+
except (json.JSONDecodeError, KeyError) as e:
|
53 |
+
print(f"An error occurred while processing the stream: {e}")
|
54 |
+
return assistant_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
def get_streamed_response(message: str, history: list) -> str:
|
57 |
+
"""Main function to interact with the chat API."""
|
58 |
+
global all_message
|
59 |
+
|
60 |
+
# Validate input
|
61 |
+
if not validate_input(message):
|
62 |
+
return "Invalid input."
|
63 |
+
|
64 |
+
# Prepare the message history
|
65 |
+
all_message.append({"role": "user", "content": message})
|
66 |
+
|
67 |
+
# Retrieve the API token
|
68 |
+
api_key = get_token()
|
69 |
+
if not api_key:
|
70 |
+
return "Unable to retrieve the API key."
|
71 |
+
|
72 |
+
# Set up the headers with the API key
|
73 |
+
headers = HEADERS.copy()
|
74 |
+
headers["Authorization"] = f"Bearer {api_key}"
|
75 |
+
|
76 |
+
# Prepare the payload for the API request
|
77 |
+
payload = {
|
78 |
+
"model": MODEL_NAME,
|
79 |
+
"temperature": 1.1,
|
80 |
+
"top_p": 0.9,
|
81 |
+
"top_k": 50,
|
82 |
+
"repetition_penalty": 1,
|
83 |
+
"n": 1,
|
84 |
+
"messages": all_message,
|
85 |
+
"stream_tokens": True,
|
86 |
+
}
|
87 |
+
|
88 |
+
# Send the request and process the stream
|
89 |
+
response = post_request(payload, headers)
|
90 |
+
if response:
|
91 |
+
assistant_response = process_stream(response)
|
92 |
+
all_message.append({"role": "assistant", "content": assistant_response})
|
93 |
+
return assistant_response
|
94 |
+
else:
|
95 |
+
return "Failed to get a response from the API."
|
96 |
|
97 |
+
# Launch the Gradio interface
|
98 |
+
gr.ChatInterface(fn=get_streamed_response, title="TherapistGPT", description="...", retry_btn="Regenerate 🔁").launch()
|