Spaces:
Sleeping
Sleeping
import subprocess | |
subprocess.check_call(["pip", "install", "openai"]) | |
subprocess.check_call(["pip", "install", "gradio", "transformers", "python-dotenv","torch"]) | |
import torch | |
import random | |
import gradio as gr | |
from transformers import TFAutoModelForCausalLM, AutoTokenizer | |
import openai | |
from dotenv import load_dotenv | |
import os | |
load_dotenv() # load environment variables from .env file | |
api_key = os.getenv("OPENAI_API_KEY") # access the value of the OPENAI_API_KEY environment variable | |
''' | |
def openai_chat(prompt): | |
if "who are you" in prompt.lower() or "your name" in prompt.lower() or "name" in prompt.lower(): | |
return "My name is ChatSherman. How can I assist you today?" | |
else: | |
prompt = "I'm an AI chatbot named ChatSherman designed by a super intelligent student named ShermanAI at the Department of Electronic and Information Engineering at the Hong Kong Polytechnic University to help you with your engineering questions. Also, I can assist with a wide range of topics and questions." + prompt | |
completions = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=10000, n=1, temperature=0.5,) | |
message = completions.choices[0].text | |
return message.strip() | |
''' | |
def getresponse(message, history): | |
system_prompt = "I'm an AI chatbot named ChatSherman designed by a super intelligent student named ShermanAI at the Department of Electronic and Information Engineering at the Hong Kong Polytechnic University to help you with your engineering questions. Also, I can assist with a wide range of topics and questions." | |
messages = [{"role":"system","content":system_prompt}] | |
for human, assistant in history: | |
messages.append({"role":"user", "content":human}) | |
messages.append({"role":"assistant", "content":assistant}) | |
if message != '': | |
messages.append({"role":"user", "content":message}) | |
response = openai.ChatCompletion.create(engine = "text-davinci-003", | |
messages = messages, | |
temperature =0.5, | |
max_tokens = 10000, | |
top_p = 0.95, | |
frequency_penalty = 1, | |
presence_penalty = 1, | |
stop = None) | |
return response["choices"][0]["message"]["content"] | |
title = "ChatSherman" | |
description = "This is an AI chatbot powered by ShermanAI. Enter your question below to get started." | |
examples = [ | |
["What is ChatSherman, and how does it work?"], | |
["Is my personal information and data safe when I use the ChatSherman chatbot?"], | |
["What are some common applications of deep learning in engineering?"] | |
] | |
gr.Interface(getresponse, title=title, description=description, examples=examples).launch() # ChatInterface |