Spaces:
Running
Running
File size: 3,072 Bytes
3745377 80c147a ee1aad0 6c9f9b3 fee4a07 273daa5 36694ba 273daa5 36694ba efec88a 5e8528a 6e47f76 273daa5 e909527 0b854b6 e909527 506647c e909527 34ec7a7 e909527 c347b33 1d068b8 273daa5 5e8528a 056ffbf 5e8528a 810511d 5e8528a 5d36f58 5e8528a 056ffbf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import subprocess
subprocess.check_call(["pip", "install", "--upgrade", "gradio"])
subprocess.check_call(["pip", "install", "-q", "transformers", "python-dotenv"])
subprocess.check_call(["pip", "install", "-q", "openai"])
import gradio as gr
from transformers import TFAutoModelForCausalLM, AutoTokenizer
import openai
from dotenv import load_dotenv
import os
'''
load_dotenv() # load environment variables from .env file
api_key = os.getenv("OPENAI_API_KEY") # access the value of the OPENAI_API_KEY environment variable
def predict(message, history):
prompt = "I'm an AI chatbot named ChatSherman designed by a super-intelligent student named ShermanAI at the Department of Electronic and Information Engineering at The Hong Kong Polytechnic University to help you with your engineering questions. Also, I can assist with a wide range of topics and questions. I am now version 2.0, which is more powerful than version 1.0, able to do more complex tasks, and optimized for chat. "
history = [(prompt, '')] + history
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo-16k-0613', #gpt-3.5-turbo-0301 faster
messages= history_openai_format,
temperature=0.5,
stream=True
)
partial_message = ""
for chunk in response:
if len(chunk['choices'][0]['delta']) != 0:
partial_message = partial_message + chunk['choices'][0]['delta']['content']
yield partial_message
title = "ChatSherman-2.0"
description = "Due to the unavailability of an OpenAI key, this chatbot is currently not operational. I apologize for any inconvenience caused. However, you may try using ChatSherman-1.0 at https://huggingface.co/spaces/ShermanAI/ChatSherman for a similar conversational experience. Thank you for your understanding"#"This is an AI chatbot powered by ShermanAI. Enter your question below to get started. "
examples = [
["What is ChatSherman, and how does it work?", []],
["Is my personal information and data safe when I use the ChatSherman chatbot?", []],
["What are some common applications of deep learning in engineering?", []]
]
gr.ChatInterface(predict, title=title, description=description, examples=examples).queue().launch(debug=True)
'''
title = "ChatSherman-2.0"
description = ("Due to the unavailability of an OpenAI key, this chatbot is currently not operational. "
"However, you may try using ChatSherman-1.0 at "
"https://huggingface.co/spaces/ShermanAI/ChatSherman for a similar conversational experience. "
"Thank you for your understanding")
def show_message():
return description
iface = gr.Interface(fn=show_message, inputs=[], outputs="text", title=title, description=description)
iface.launch(debug=True) |