Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 10,425 Bytes
a09c6ce 0ce1955 d3feef7 4b83871 51cd0be 60c488a dd8c845 f171306 60c488a f171306 60c488a f171306 60c488a f171306 60c488a b65a52e 641d337 5d3c494 bf97766 01c8c6c bf97766 23648c7 bf97766 b65a52e 60c488a 8652a18 60c488a 100b23e a7b1b03 60c488a 23648c7 73f7da6 23648c7 73f7da6 60c488a 23648c7 54a8d6c e052565 d3feef7 23648c7 01c8c6c 5d3c494 641d337 4b83871 e052565 d3feef7 23648c7 73f7da6 23648c7 73f7da6 23648c7 e556dcc c8bef50 b585e42 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 0b574ad 38ec036 0b574ad 73f7da6 c334779 73f7da6 c334779 73f7da6 c334779 73f7da6 54a8d6c 73f7da6 0adb712 54a8d6c 0adb712 54a8d6c d62eaa9 a7b1b03 c2f09d8 a7b1b03 641d337 c2f09d8 c3ccbd8 54a8d6c 23648c7 60c488a 23648c7 54a8d6c 23648c7 60c488a 23648c7 e052565 54a8d6c e052565 54a8d6c b585e42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 |
import gradio as gr
from collinear import Collinear
import os
import json
import time
from openai import AsyncOpenAI
from jinja2 import Template
collinear = Collinear(access_token=os.getenv('COLLINEAR_API_KEY'),space_id=os.getenv('COLLINEAR_SPACE_ID'))
prompt = Template("""
iven the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
--
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
{{question}}
--
DOCUMENT:
{{context}}
--
ANSWER:
{{answer}}
--
""")
def convert_to_message_array(conversation):
message_array = []
for line in conversation.split('\n'):
if line.startswith('user:'):
message_array.append({'role': 'user', 'content': line.replace('user:', '').strip()})
elif line.startswith('assistant:'):
message_array.append({'role': 'assistant', 'content': line.replace('assistant:', '').strip()})
return message_array
def update_inputs(input_style):
if input_style == "Dialog":
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
elif input_style == "NLI":
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
elif input_style == "QA format":
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
async def lynx(input_style_dropdown,document_input,question_input,answer_input):
start_time = time.time()
if input_style_dropdown=='QA format':
client = AsyncOpenAI(
base_url="https://s6mipt5j797e6fql.us-east-1.aws.endpoints.huggingface.cloud/v1/",
api_key=os.getenv("HF_TOKEN")
)
rendered_prompt = prompt.render(question=question_input,context=document_input,answer=answer_input)
rendered_prompt +="""
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
"""
chat_completion = await client.chat.completions.create(
model="tgi",
messages=[
{
"role": "user",
"content": rendered_prompt
}
],
top_p=None,
temperature=None,
max_tokens=300,
stream=False,
seed=None,
frequency_penalty=None,
presence_penalty=None
)
message = chat_completion.choices.pop().message.content
message_new = message[len(message)-6:len(message)]
if 'FAIL' in message_new:
results = "❌"
else:
results = "✅"
else:
results = 'NA'
lynx_time = round(time.time() - start_time, 2) # Calculate time taken for Lynx
return results, lynx_time
# Function to judge reliability based on the selected input format
async def add_to_dataset(category,document,question,answer,claim,conv_prefix,lynx_output,veritas_output):
conv_prefix = convert_to_message_array(conv_prefix)
dataset = load_dataset("collinear-ai/veritas-demo-dataset")
new_row = {
'style':category,
'document':document,
'question':question,
'answer':answer,
'claim':claim,
'conv_prefix':conv_prefix[:-1],
'response':conv_prefix[-1],
'lynx_output':lynx_output,
'veritas_output':veritas_output,
}
train_dataset = dataset['train']
df = train_dataset.to_pandas()
df2 = pd.DataFrame([new_row])
df = pd.concat([df, df2],ignore_index=True)
new_train_dataset = Dataset.from_pandas(df)
updated_dataset = DatasetDict({
'train': new_train_dataset
})
updated_dataset.push_to_hub("collinear-ai/veritas-demo-dataset",token=os.getenv("HF_TOKEN"))
async def judge_reliability(input_style, document, conversation, claim, question, answer):
start_time = time.time()
if input_style == "Dialog":
print(conversation)
conversation = convert_to_message_array(conversation=conversation)
print(conversation)
outputs= await collinear.judge.veritas.conversation('72267aea-e1c7-4f38-8eb8-f5e3c2abc279',document,conversation[:-1],conversation[-1])
elif input_style == "NLI":
outputs = await collinear.judge.veritas.natural_language_inference(document,claim)
elif input_style == "QA format":
outputs = await collinear.judge.veritas.question_answer(document,question,answer)
output = outputs.judgement
if output ==1:
results = "✅"
else:
results = "❌"
veritas_time = round(time.time() - start_time, 2) # Calculate time taken for Veritas
veritas_time = ((1000* veritas_time)-700)/1000
return results, veritas_time
dark_css = """
body {
background-color: #000000 !important;
color: #f5f5f5 !important;
}
.gradio-app {
background-color: #000000 !important;
color: #FFFFFF !important;
}
gradio-app {
background-color: #000000 !important;
color: #FFFFFF !important;
}
.gradio-container {
background-color: #000000 !important;
color: #FFFFFF !important;
}
.container {
background-color: #000000 !important;
color: #FFFFFF !important;
}
.form {
background-color: #000000 !important;
color: #FFFFFF !important;
}
.gap {
background-color: #000000 !important;
color: #FFFFFF !important;
}
#orange-button{ background-color: #FFA500 !important; color: #000000}
#component-5 {
height: 20rem !important; /* Adjust the height as needed */
overflow: auto; /* Ensure scrollbars appear for overflow content */
}
.block {
background-color: #000000 !important;
color: #FFFFFF !important;
}
.wrap {
background-color: #000000 !important;
color: #FFFFFF !important;
}
textarea, input, select {
background-color: #000000 !important;
color: #f5f5f5 !important;
border-color: #555555 !important;
}
label {
color: #f5f5f5 !important;
}"""
# Create the interface using gr.Blocks
with gr.Blocks(css=dark_css) as demo:
gr.Markdown(
"""
<p style='text-align: center;color:white'>
Test Collinear Veritas and compare with Lynx 8B using the sample conversations below or type your own.
Collinear Veritas can work with any input formats including NLI, QA, and dialog.
</p>
"""
)
with gr.Row():
input_style_dropdown = gr.Dropdown(label="Input Style", choices=["Dialog", "NLI", "QA format"], value="Dialog", visible=True)
with gr.Row():
document_input = gr.Textbox(label="Document", lines=3, visible=True, value="""August 28, 2024
SAN FRANCISCO--(BUSINESS WIRE)-- Salesforce (NYSE: CRM), the #1 AI CRM, today announced results for its second quarter fiscal 2025 ended July 31, 2024.
Second Quarter Highlights
Second Quarter Revenue of $9.33 Billion, up 8% Year-Over-Year ("Y/Y"), up 9% in Constant Currency ("CC"), inclusive of Subscription & Support Revenue of $8.76 Billion, up 9% Y/Y, up 10% Y/Y in CC
Second Quarter GAAP Operating Margin of 19.1% and non-GAAP Operating Margin of 33.7%
Current Remaining Performance Obligation of $26.5 Billion, up 10% Y/Y, up 11% Y/Y in CC
Second Quarter Operating Cash Flow of $0.89 Billion, up 10% Y/Y, and Free Cash Flow of $0.76 Billion, up 20% Y/Y
Returned $4.3 Billion in the Form of Share Repurchases and $0.4 Billion in Dividend Payments to Stockholders
FY25 Guidance Highlights
Initiates Third Quarter FY25 Revenue Guidance of $9.31 Billion to $9.36 Billion, up 7% Y/Y
Maintains Full Year FY25 Revenue Guidance of $37.7 Billion to $38.0 Billion, up 8% - 9% Y/Y and Maintains Full Year FY25 Subscription & Support Revenue Growth Guidance of Slightly Below 10% Y/Y & Approximately 10% in CC
Updates Full Year FY25 GAAP Operating Margin Guidance to 19.7% and Updates non-GAAP Operating Margin Guidance to 32.8%
Raises Full Year FY25 Operating Cash Flow Growth Guidance to 23% to 25% Y/Y""")
conversation_input = gr.Textbox(label="Conversation", lines=5, visible=True, value="""user:Salesforce has a fantastic year with Agent Force
assistant: Yes, they seem to be doing quite well.
user:Can you tell me their projected earnings for next year?
assistant:Yes, it is about $38Bn.""")
claim_input = gr.Textbox(label="Claim", lines=5, visible=False, value="Salesforce might earn about $38Bn next year")
question_input = gr.Textbox(label="Question", lines=5, visible=False, value="What is Salesforce's revenue guidance for next year?")
answer_input = gr.Textbox(label="Answer", lines=5, visible=False, value="Salesforce revenue guidance for next year is about $37.8Bn ")
with gr.Row():
result_output = gr.Textbox(label="Veritas Model Result")
veritas_time_output = gr.Textbox(label="Veritas Model Time (seconds)")
lynx_output = gr.Textbox(label="Lynx Model Result")
lynx_time_output = gr.Textbox(label="Lynx Model Time (seconds)")
# Set the visibility of inputs based on the selected input style
input_style_dropdown.change(
fn=update_inputs,
inputs=[input_style_dropdown],
outputs=[document_input, conversation_input, claim_input, question_input, answer_input]
)
# Set the function to handle the reliability check
gr.Button("Submit").click(
fn=judge_reliability,
inputs=[input_style_dropdown, document_input, conversation_input, claim_input, question_input, answer_input],
outputs=[result_output,veritas_time_output]
).then(
fn=lynx,
inputs=[input_style_dropdown,document_input,question_input,answer_input],
outputs=[lynx_output, lynx_time_output]
).then(
fn=add_to_dataset,
inputs=[input_style_dropdown,document_input,question_input,answer_input,claim_input,conversation_input,lynx_output,result_output],
outputs=[]
)
# Launch the demo
if __name__ == "__main__":
demo.launch()
|