Spaces:
Sleeping
Sleeping
File size: 3,904 Bytes
5564bca ba438d9 bb50d71 5564bca ba438d9 5564bca bb50d71 ba438d9 bb50d71 ba438d9 5564bca bb50d71 5564bca bb50d71 5564bca bb50d71 5564bca bb50d71 ba438d9 5564bca ba438d9 5564bca bb50d71 5564bca bb50d71 5564bca ba438d9 5564bca ba438d9 5564bca bb50d71 5564bca bb50d71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import gradio as gr
from openai import OpenAI
import os
import json
css = '''
.gradio-container{max-width: 1000px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
# Access token for Hugging Face
ACCESS_TOKEN = os.getenv("HF_TOKEN")
# Initialize the client for the OpenAI model
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1/",
api_key=ACCESS_TOKEN,
)
# File path for storing user preferences
USER_DATA_PATH = "user_data.json"
# Load user preferences if they exist
def load_user_preferences():
if os.path.exists(USER_DATA_PATH):
with open(USER_DATA_PATH, "r") as file:
return json.load(file)
return {}
# Save user preferences
def save_user_preferences(data):
with open(USER_DATA_PATH, "w") as file:
json.dump(data, file)
# Respond function that generates the assistant's reply
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
# Load user preferences
user_data = load_user_preferences()
# Custom welcome message or save user input
if message.lower().startswith("my name is"):
user_data["name"] = message.split("is")[-1].strip()
save_user_preferences(user_data)
response = f"Nice to meet you, {user_data['name']}! How can I assist you with your travel plans today?"
yield response
return
if message.lower().startswith("i like to travel to"):
user_data["favorite_destination"] = message.split("to")[-1].strip()
save_user_preferences(user_data)
response = f"Got it! I noted that you enjoy traveling to {user_data['favorite_destination']}."
yield response
return
if message.lower().startswith("my budget is"):
user_data["budget"] = message.split("is")[-1].strip()
save_user_preferences(user_data)
response = f"Understood! I'll keep your budget of {user_data['budget']} in mind when suggesting travel options."
yield response
return
# Use user's name and preferences in responses if available
name = user_data.get("name", "Traveler")
favorite_destination = user_data.get("favorite_destination", "various places")
budget = user_data.get("budget", "not specified")
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# Add the current user message
messages.append({"role": "user", "content": message})
response = f"Hello {name}! You mentioned you like traveling to {favorite_destination}. Let's plan something exciting within your budget of {budget}.\n"
# Generate a response using the OpenAI client
for message in client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
messages=messages,
):
token = message.choices[0].delta.content
response += token
yield response
# Gradio interface
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(
value="You are a friendly travel assistant. Offer personalized travel tips and remember user preferences.",
label="System message"
),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-P",
),
],
css=css,
theme="allenai/gradio-theme",
)
if __name__ == "__main__":
demo.launch()
|