Spaces:
Sleeping
Sleeping
import subprocess | |
subprocess.check_call(["pip", "install", "-q", "openai"]) | |
subprocess.check_call(["pip", "install", "-q", "gradio", "transformers", "python-dotenv"])import gradio as gr | |
from transformers import TFAutoModelForCausalLM, AutoTokenizer | |
import openai | |
from dotenv import load_dotenv | |
import os | |
load_dotenv() # load environment variables from .env file | |
api_key = os.getenv("OPENAI_API_KEY") # access the value of the OPENAI_API_KEY environment variable | |
def openai_chat(prompt): | |
completions = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=1024, n=1, temperature=0.5,) | |
message = completions.choices[0].text | |
return message.strip() | |
def chatbot(talk_to_chatsherman, history=[]): | |
output = openai_chat(talk_to_chatsherman) | |
history.append((talk_to_chatsherman, output)) | |
return history, history | |
title = "My Chatbot Title" | |
description = "This is an AI chatbot powered by ShermanAI using GPT-3 model." | |
examples = [ | |
["Hello, how are you?", []], | |
["What's the meaning of life?", []], | |
["Tell me a joke.", []] | |
] | |
inputs = [gr.inputs.Textbox(label="Talk to ChatSherman: "), "state"] | |
outputs = ["chatbot", "state"] | |
interface = gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples) | |
interface.launch(debug=True) |