|
import gradio as gr |
|
import openai |
|
import json |
|
from factool import Factool |
|
import os |
|
|
|
def chat_with_gpt(api_key, model, message): |
|
openai.api_key = api_key |
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful assistant."}, |
|
{"role": "user", "content": message}, |
|
] |
|
) |
|
return response.choices[0].message['content'] |
|
|
|
def fact_check(api_key, model, message, response): |
|
os.environ['OPENAI_API_KEY'] = api_key |
|
factool_instance = Factool("model") |
|
inputs = [ |
|
{ |
|
"prompt": message, |
|
"response": response, |
|
"category": "kbqa", |
|
"search_type": "online", |
|
}, |
|
] |
|
response_list = factool_instance.run(inputs) |
|
return response_list |
|
|
|
def process_input(api_key, model, message): |
|
response = chat_with_gpt(api_key, model, message) |
|
return fact_check(api_key, model, message, response) |
|
|
|
iface = gr.Interface(fn=process_input, |
|
inputs=["text", gr.inputs.Radio(["gpt-3.5-turbo", "gpt-4"], label="Model"), "text"], |
|
outputs=gr.outputs.JSON()) |
|
iface.launch() |
|
|
|
|