Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
tanveeshsingh
commited on
Commit
•
60c488a
1
Parent(s):
4b83871
Changes in app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
|
|
2 |
from collinear import Collinear
|
3 |
import os
|
4 |
import json
|
|
|
|
|
5 |
collinear = Collinear(access_token=os.getenv('COLLINEAR_API_KEY'))
|
6 |
|
7 |
def update_inputs(input_style):
|
@@ -13,6 +15,53 @@ def update_inputs(input_style):
|
|
13 |
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
|
14 |
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# Function to judge reliability based on the selected input format
|
17 |
async def judge_reliability(input_style, document, conversation, claim, question, answer):
|
18 |
if input_style == "Conv":
|
@@ -41,7 +90,9 @@ with gr.Blocks() as demo:
|
|
41 |
answer_input = gr.Textbox(label="Answer", lines=5, visible=False, value="CV was born in Iowa")
|
42 |
|
43 |
with gr.Row():
|
44 |
-
result_output = gr.Textbox(label="
|
|
|
|
|
45 |
|
46 |
|
47 |
# Set the visibility of inputs based on the selected input style
|
@@ -56,6 +107,10 @@ with gr.Blocks() as demo:
|
|
56 |
fn=judge_reliability,
|
57 |
inputs=[input_style_dropdown, document_input, conversation_input, claim_input, question_input, answer_input],
|
58 |
outputs=result_output
|
|
|
|
|
|
|
|
|
59 |
)
|
60 |
|
61 |
# Launch the demo
|
|
|
2 |
from collinear import Collinear
|
3 |
import os
|
4 |
import json
|
5 |
+
from openai import AsyncOpenAI
|
6 |
+
from jinja2 import Template
|
7 |
collinear = Collinear(access_token=os.getenv('COLLINEAR_API_KEY'))
|
8 |
|
9 |
def update_inputs(input_style):
|
|
|
15 |
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
|
16 |
|
17 |
|
18 |
+
async def lynx(input_style_dropdown,document_input,question_input,answer_input):
|
19 |
+
if input_style_dropdown=='QA format':
|
20 |
+
client = AsyncOpenAI(
|
21 |
+
base_url="https://s6mipt5j797e6fql.us-east-1.aws.endpoints.huggingface.cloud/v1/",
|
22 |
+
api_key=os.getenv("HF_TOKEN")
|
23 |
+
)
|
24 |
+
PROMPT = Template("""
|
25 |
+
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
|
26 |
+
|
27 |
+
--
|
28 |
+
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
|
29 |
+
{question}
|
30 |
+
|
31 |
+
--
|
32 |
+
DOCUMENT:
|
33 |
+
{context}
|
34 |
+
|
35 |
+
--
|
36 |
+
ANSWER:
|
37 |
+
{answer}
|
38 |
+
|
39 |
+
--
|
40 |
+
|
41 |
+
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
|
42 |
+
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
|
43 |
+
""")
|
44 |
+
chat_completion = await client.chat.completions.create(
|
45 |
+
model="tgi",
|
46 |
+
messages=[
|
47 |
+
{
|
48 |
+
"role": "user",
|
49 |
+
"content": PROMPT.render(question=question_input,context=document_input,answer=answer_input)
|
50 |
+
}
|
51 |
+
],
|
52 |
+
top_p=None,
|
53 |
+
temperature=None,
|
54 |
+
max_tokens=150,
|
55 |
+
stream=False,
|
56 |
+
seed=None,
|
57 |
+
frequency_penalty=None,
|
58 |
+
presence_penalty=None
|
59 |
+
)
|
60 |
+
print(chat_completion)
|
61 |
+
return chat_completion.choices.pop().message.content
|
62 |
+
else:
|
63 |
+
return 'NA'
|
64 |
+
|
65 |
# Function to judge reliability based on the selected input format
|
66 |
async def judge_reliability(input_style, document, conversation, claim, question, answer):
|
67 |
if input_style == "Conv":
|
|
|
90 |
answer_input = gr.Textbox(label="Answer", lines=5, visible=False, value="CV was born in Iowa")
|
91 |
|
92 |
with gr.Row():
|
93 |
+
result_output = gr.Textbox(label="Veritas Model")
|
94 |
+
|
95 |
+
lynx_output = gr.Textbox(label="Lynx Model")
|
96 |
|
97 |
|
98 |
# Set the visibility of inputs based on the selected input style
|
|
|
107 |
fn=judge_reliability,
|
108 |
inputs=[input_style_dropdown, document_input, conversation_input, claim_input, question_input, answer_input],
|
109 |
outputs=result_output
|
110 |
+
).then(
|
111 |
+
fn=lynx,
|
112 |
+
inputs=[input_style_dropdown,document_input,question_input,answer_input],
|
113 |
+
outputs=lynx_output
|
114 |
)
|
115 |
|
116 |
# Launch the demo
|