|
import gradio as gr |
|
import random |
|
import requests |
|
|
|
import numpy as np |
|
|
|
import pandas as pd |
|
|
|
|
|
|
|
title = "Murder on Horsea Island Prototype with Sentence Similarity (Paraphrase XLM-R multilingual)🔪 (WORK IN PROGRESS)" |
|
description = "Prototype of the Unity Game (to test the questions)." |
|
article = """ |
|
""" |
|
theme="huggingface" |
|
|
|
|
|
|
|
|
|
SS_API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/paraphrase-xlm-r-multilingual-v1" |
|
|
|
|
|
|
|
|
|
def build_initial_questions_and_answers(): |
|
|
|
eleanor_df = pd.read_csv("eleanor.csv", delimiter=",") |
|
eleanor_len = eleanor_df.shape[0] |
|
eleanor_questions = [eleanor_df["Questions"][i] for i in range(eleanor_len)] |
|
eleanor_answers = [eleanor_df["Answers"][i] for i in range(eleanor_len)] |
|
|
|
|
|
tom_df = pd.read_csv("tom.csv", delimiter=",") |
|
tom_len = tom_df.shape[0] |
|
tom_questions = [tom_df["Questions"][i] for i in range(tom_len)] |
|
tom_answers = [tom_df["Answers"][i] for i in range(tom_len)] |
|
|
|
|
|
charles_df = pd.read_csv("charles.csv", delimiter=",") |
|
charles_len = charles_df.shape[0] |
|
charles_questions = [charles_df["Questions"][i] for i in range(charles_len)] |
|
charles_answers = [charles_df["Answers"][i] for i in range(charles_len)] |
|
|
|
return eleanor_questions, eleanor_answers, tom_questions, tom_answers, charles_questions, charles_answers |
|
|
|
|
|
|
|
def build_json(message, questions): |
|
json = { |
|
"inputs": { |
|
"source_sentence": message, |
|
"sentences": questions |
|
}, |
|
} |
|
return json |
|
|
|
|
|
def query(payload, model): |
|
response = requests.post(SS_API_URL, json=payload) |
|
return response.json() |
|
|
|
def answer(output_json, character): |
|
|
|
idx = np.argmax(output_json) |
|
|
|
if (character == "eleanor"): |
|
answer_ = eleanor_answers[idx] |
|
|
|
elif (character == "tom"): |
|
answer_ = tom_answers[idx] |
|
|
|
else: |
|
answer_ = charles_answers[idx] |
|
|
|
return answer_ |
|
|
|
|
|
def chat(message, character): |
|
history = gr.get_state() or [] |
|
|
|
if (character == "eleanor"): |
|
json = build_json(message, eleanor_questions) |
|
|
|
elif (character == "tom"): |
|
json = build_json(message, tom_questions) |
|
|
|
else: |
|
json = build_json(message, charles_questions) |
|
|
|
output = query(json) |
|
|
|
answer_ = answer(output, character) |
|
|
|
|
|
history.append((message, answer_)) |
|
gr.set_state(history) |
|
html = "" |
|
for user_msg, resp_msg in history: |
|
html += f"{user_msg}" |
|
html += f"{resp_msg}" |
|
html += "" |
|
return html |
|
|
|
eleanor_questions, eleanor_answers, tom_questions, tom_answers, charles_questions, charles_answers = build_initial_questions_and_answers() |
|
|
|
choices = ["Eleanor", "Tom", "Charles (The Butler)"] |
|
character = gr.inputs.Radio(choices, type="value", default=None, label=None) |
|
|
|
iface = gr.Interface(chat, ["text", character], "html", css=""" |
|
.chatbox {display:flex;flex-direction:column} |
|
.user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%} |
|
.user_msg {background-color:cornflowerblue;color:white;align-self:start} |
|
.resp_msg {background-color:lightgray;align-self:self-end} |
|
""", allow_screenshot=False, allow_flagging=False) |
|
|
|
iface.launch() |
|
|
|
if __name__ == "__main__": |
|
|
|
iface.launch() |