Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import json | |
API_URL = "https://api.openai.com/v1/chat/completions" | |
def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]): | |
messages = format_messages(chatbot, inputs, chat_counter) | |
payload = create_payload(messages, top_p, temperature) | |
response = make_request(API_URL, openai_api_key, payload) | |
# Processar a resposta | |
chat, new_history, token_counter = process_response(response, history) | |
# Atualizar o histórico apenas se houver novos tokens | |
if token_counter > 0: | |
history = new_history | |
return chat, history, token_counter | |
def format_messages(chatbot, inputs, chat_counter): | |
messages = [] | |
if chat_counter != 0: | |
for i in range(len(chatbot)): | |
user_message = {"role": "user", "content": chatbot[i][0]} | |
assistant_message = {"role": "assistant", "content": chatbot[i][1]} | |
messages.extend([user_message, assistant_message]) | |
messages.append({"role": "user", "content": inputs}) | |
return messages | |
def create_payload(messages, top_p, temperature): | |
return { | |
"model": "gpt-4-1106-preview", | |
"messages": messages, | |
"temperature": temperature, | |
"top_p": top_p, | |
"n": 1, | |
"stream": True, | |
"presence_penalty": 0, | |
"frequency_penalty": 0, | |
} | |
def make_request(url, api_key, payload): | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {api_key}" | |
} | |
response = requests.post(url, headers=headers, json=payload, stream=True) | |
return response | |
def process_response(response, history): | |
token_counter = 0 | |
partial_words = "" | |
for chunk in response.iter_lines(): | |
if chunk: | |
chunk_str = chunk.decode('utf-8').lstrip('data: ') | |
if "[DONE]" in chunk_str: | |
break | |
try: | |
chunk_json = json.loads(chunk_str) | |
if 'choices' in chunk_json and len(chunk_json['choices']) > 0: | |
chunk_data = chunk_json['choices'][0].get('delta', {}) | |
if 'content' in chunk_data: | |
content = chunk_data['content'] | |
partial_words += content | |
if token_counter == 0: | |
history.append(" " + partial_words) | |
else: | |
history[-1] = partial_words | |
token_counter += 1 | |
except json.JSONDecodeError as e: | |
print("Error decoding JSON response:", e) | |
print("Raw chunk:", chunk_str) | |
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] | |
return chat, history, token_counter | |
def setup_ui(): | |
with gr.Blocks() as demo: | |
with gr.Column(): | |
openai_api_key = gr.Textbox(type='password', label="Insira sua chave de API OpenAI aqui") | |
chatbot = gr.Chatbot() | |
inputs = gr.Textbox(placeholder="Olá!", label="Digite uma entrada e pressione Enter", lines=3) | |
state = gr.State([]) | |
b1 = gr.Button(value="Executar", variant="primary") | |
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p") | |
temperature = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Temperature") | |
chat_counter = gr.Number(value=0, visible=False) | |
inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter]) | |
b1.click(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter]) | |
return demo | |
def main(): | |
demo = setup_ui() | |
demo.launch() | |
if __name__ == "__main__": | |
main() | |