andrewsiah's picture
Upload folder using huggingface_hub
23dea16 verified
import gradio as gr
import os
import openai
from dataclasses import dataclass
@dataclass
class Args:
frequency_penalty: float = 0
max_tokens: int = 32
n: int = 1
presence_penalty: float = 0
seed: int = 42
stop: str = None
stream: bool = False
temperature: float = 0.8
top_p: float = 0.95
def get_completion(client, model_id, messages, args):
completion_args = {
"model": model_id,
"messages": messages,
"frequency_penalty": args.frequency_penalty,
"max_tokens": args.max_tokens,
"n": args.n,
"presence_penalty": args.presence_penalty,
"seed": args.seed,
"stop": args.stop,
"stream": args.stream,
"temperature": args.temperature,
"top_p": args.top_p,
}
completion_args = {
k: v for k, v in completion_args.items() if v is not None
}
try:
response = client.chat.completions.create(**completion_args)
return response
except Exception as e:
print(f"Error during API call: {e}")
return None
def chat_response(message, history, model):
# Set up OpenAI client
openai_api_key = "super-secret-token"
os.environ['OPENAI_API_KEY'] = openai_api_key
openai.api_key = openai_api_key
openai.api_base = "https://turingtest--example-vllm-openai-compatible-serve.modal.run/v1"
client = openai.OpenAI(api_key=openai_api_key, base_url=openai.api_base)
# Prepare messages
messages = [{"role": "system", "content": "You are a helpful assistant."}]
# Convert history to the correct format
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
# Set up arguments
args = Args()
# Use the correct model identifier
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
# Get completion
response = get_completion(client, model_id, messages, args)
if response and response.choices:
return response.choices[0].message.content
else:
return f"Error: Please retry or contact support if retried more than twice."
def create_chat_interface(model):
return gr.ChatInterface(
fn=lambda message, history: chat_response(message, history, model),
chatbot=gr.Chatbot(height=400, label=f"Choice {model}"),
textbox=gr.Textbox(placeholder="Message", container=False, scale=7),
# title=f"Choice {model}",
description="",
theme="dark",
# examples=[["what's up"]],
# cache_examples=True,
retry_btn=None,
undo_btn=None,
clear_btn=None,
)
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", neutral_hue="slate"), head=
"""
<style>
body {
font-family: 'Calibri', sans-serif; /* Choose your desired font */
}
</style>
""") as demo:
gr.Markdown("## Turing Test Prompt Competition")
with gr.Row():
with gr.Column():
chat_a = create_chat_interface("A")
with gr.Column():
chat_b = create_chat_interface("B")
with gr.Row():
a_better = gr.Button("πŸ‘‰ A is better", scale=1)
b_better = gr.Button("πŸ‘ˆ B is better", scale=1)
tie = gr.Button("🀝 Tie", scale=1)
both_bad = gr.Button("πŸ‘Ž Both are bad", scale=1)
prompt_input = gr.Textbox(placeholder="Message for both...", container=False)
send_btn = gr.Button("Send to Both", variant="primary")
def send_prompt(prompt):
# This function will now return the prompt for both chatbots
return prompt, prompt, gr.update(value=""), gr.update(value="")
# Update the click and submit events
send_btn.click(
send_prompt,
inputs=[prompt_input],
outputs=[
chat_a.textbox,
chat_b.textbox,
prompt_input,
prompt_input
]
)
prompt_input.submit(
send_prompt,
inputs=[prompt_input],
outputs=[
chat_a.textbox,
chat_b.textbox,
prompt_input,
prompt_input
]
)
if __name__ == "__main__":
demo.launch(share=True)