File size: 5,353 Bytes
da772cb 4ba6902 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import gradio as gr
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
model_name = "facebook/blenderbot-400M-distill"
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
def translate(text,mode):
if mode== "ztoe":
from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline
mode_name = 'liam168/trans-opus-mt-zh-en'
model = AutoModelWithLMHead.from_pretrained(mode_name)
tokenizer = AutoTokenizer.from_pretrained(mode_name)
translation = pipeline("translation_zh_to_en", model=model, tokenizer=tokenizer)
translate_result = translation(text, max_length=400)
if mode == "etoz":
from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline
mode_name = 'liam168/trans-opus-mt-en-zh'
model = AutoModelWithLMHead.from_pretrained(mode_name)
tokenizer = AutoTokenizer.from_pretrained(mode_name)
translation = pipeline("translation_en_to_zh", model=model, tokenizer=tokenizer)
translate_result = translation(text, max_length=400)
return translate_result
chat_history=[]
def add_emoji(response):
# Define the keywords and their corresponding emojis
keyword_emoji_dict = {
"happy": "π",
"sad": "π’",
"sorry":"π",
"love": "β€οΈ",
"like": "π",
"dislike": "π",
"Why": "π₯Ί",
"cat":"π±",
"dog":"πΆ",
"ε¨" : "π"
}
for keyword, emoji in keyword_emoji_dict.items():
response = response.replace(keyword, f"{keyword} {emoji}")
return response
def add_shortform(response):
# Define the keywords and their corresponding keywords
keyword_shortform_dict = {
"You only live once": "YOLO",
"funny": "LOL",
"laugh":"LOL",
"nevermind": "nvm",
"sorry": "sorryyyyy",
"tell me": "LMK",
"By the way": "BTW",
"don't know":"DK",
"do not know":"IDK"
}
for keyword, st in keyword_shortform_dict.items():
response = response.replace(keyword, f"{st}")
return response
def chatbot(text,name):
global chat_history
global Itext
global bname
if name=='':
name="your chatbot"
bname= name
Itext=text
# Try to detect the language of the input text
# If the input language is Chinese, convert the text to lowercase and check if it contains any Chinese characters
is_chinese = any(0x4e00 <= ord(char) <= 0x9fff for char in text.lower())
if is_chinese:
text = translate(text,"ztoe")
text=f"{text}"
text=text[23:(len(text)-3)]
# Look for keywords in the previous chat history
keyword_responses = {
"how are you": "I'm doing wellπ, thank you for asking!",
"bye": "Goodbye!ππ»",
"thank you": "You're welcome!π",
"hello": f'I am {bname}. Nice to meet you!π',
"Hello": f'I am {bname}. Nice to meet you!π',
"Hi": f'I am {bname}. Nice to meet you!π',
"hi": f'I am {bname}. Nice to meet you!π',
}
# Generate a response based on the previous messages
if len(chat_history) > 0:
# Get the last message from the chat history
last_message = chat_history[-1][1]
# Generate a response based on the last message
encoded_input = tokenizer.encode(last_message + tokenizer.eos_token + text, return_tensors='pt')
generated = model.generate(encoded_input, max_length=1024, do_sample=True)
response = tokenizer.decode(generated[0], skip_special_tokens=True)
response=f"{response}"
else:
# If there is no previous message, generate a response using the default method
encoded_input = tokenizer(text, return_tensors='pt')
generated = model.generate(**encoded_input)
response = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
response=f"{response}"
if text in keyword_responses:
response = keyword_responses[text]
# If the input language was Chinese, translate the response back to Chinese
if is_chinese:
from hanziconv import HanziConv
response = translate(response,"etoz")
response = HanziConv.toTraditional(f"{response}")
response = f"{response} "
response=response[23:(len(response)-4)]
else:
response = response
# Add emojis to the response
response = add_emoji(response)
response = add_shortform(response)
chat_history.append((Itext,response))
# Format the chat history as an HTML string for display
history_str = ""
for name, msg in chat_history:
history_str += f"<strong>{name}:</strong> {msg}<br>"
# Return the response along with the chat history
return (chat_history)
iface =gr.Interface(fn=chatbot,
inputs=[gr.inputs.Textbox(label="Chat", placeholder="Say somehting"),
gr.inputs.Textbox(label="Name the Bot", placeholder="give me a name")],
outputs=[gr.Chatbot(label="Chat Here")],
title="Emphatic Chatbot",
allow_flagging=False,
layout="vertical",
theme='gstaff/xkcd' ,
examples=[["εθ¦"], ["Hello"]]
)
#.launch(share=True)
iface.launch() |