File size: 1,537 Bytes
833627a
02604dc
fee4a07
 
 
36694ba
 
 
 
6b17378
36694ba
 
eb93dd0
36694ba
92db671
 
32488d9
36694ba
 
8a82df0
92db671
 
 
8a82df0
92db671
 
148cfd6
92db671
 
 
148cfd6
92db671
 
 
148cfd6
36694ba
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import subprocess
subprocess.check_call(["pip", "install", "-q", "openai"])
subprocess.check_call(["pip", "install", "-q", "gradio", "transformers", "python-dotenv"])

import gradio as gr
from transformers import TFAutoModelForCausalLM, AutoTokenizer
import openai
from dotenv import load_dotenv
import os

load_dotenv() # load environment variables from .env file
api_key = os.getenv("OPENAI_API_KEY") # access the value of the OPENAI_API_KEY environment variable

def openai_chat(prompt):
    topics = ["engineering", "science", "math", "IT", "history", "English"]
    prompt = f"Hello! My name is ChatSherman and I'm an AI chatbot designed to help you with your questions in {', '.join(topics)}. {prompt}"
    completions = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=1024, n=1, temperature=0.5,)
    message = completions.choices[0].text
    return message.strip()

def chatbot(question):
    response = openai_chat(question)
    return response

title = "ChatSherman - AI Chatbot"
description = "This is an AI chatbot powered by ShermanAI"
examples = [
    ["What is the Pythagorean theorem?"],
    ["Can you explain the concept of electrical conductivity?"],
    ["Who was the first president of the United States?"],
]
inputs = [gr.inputs.Textbox(label="Enter your question: ")]
outputs = gr.outputs.Textbox(label="ChatSherman's Response:")

interface = gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples)
interface.launch(debug=True)