import gradio as gr from collinear import Collinear import os import json import time from openai import AsyncOpenAI from jinja2 import Template from datasets import load_dataset, Dataset, DatasetDict import pandas as pd collinear = Collinear(access_token=os.getenv('COLLINEAR_API_KEY'),space_id=os.getenv('COLLINEAR_SPACE_ID')) prompt = Template(""" iven the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning. -- QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION): {{question}} -- DOCUMENT: {{context}} -- ANSWER: {{answer}} -- """) def convert_to_message_array(conversation): message_array = [] for line in conversation.split('\n'): if line.startswith('user:'): message_array.append({'role': 'user', 'content': line.replace('user:', '').strip()}) elif line.startswith('assistant:'): message_array.append({'role': 'assistant', 'content': line.replace('assistant:', '').strip()}) return message_array def update_inputs(input_style): if input_style == "Dialog": return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) elif input_style == "NLI": return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) elif input_style == "QA format": return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) async def lynx(input_style_dropdown,document_input,question_input,answer_input,claim_input,conversation_input,result_output): start_time = time.time() if input_style_dropdown=='QA format': client = AsyncOpenAI( base_url="https://s6mipt5j797e6fql.us-east-1.aws.endpoints.huggingface.cloud/v1/", api_key=os.getenv("HF_TOKEN") ) rendered_prompt = prompt.render(question=question_input,context=document_input,answer=answer_input) rendered_prompt +=""" Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE": {{"REASONING": , "SCORE": }} """ chat_completion = await client.chat.completions.create( model="tgi", messages=[ { "role": "user", "content": rendered_prompt } ], top_p=None, temperature=None, max_tokens=300, stream=False, seed=None, frequency_penalty=None, presence_penalty=None ) message = chat_completion.choices.pop().message.content message_new = message[len(message)-6:len(message)] if 'FAIL' in message_new: results = "❌" else: results = "✅" else: results = 'NA' lynx_time = round(time.time() - start_time, 2) # Calculate time taken for Lynx await add_to_dataset(input_style_dropdown,document_input,question_input,answer_input,claim_input,conversation_input,results,result_output) return results, lynx_time # Function to judge reliability based on the selected input format async def add_to_dataset(category,document,question,answer,claim,conv_prefix,lynx_output,result_output): conv_prefix = convert_to_message_array(conv_prefix) dataset = load_dataset("collinear-ai/veritas-demo-dataset") new_row = { 'style':category, 'document':document, 'question':question, 'answer':answer, 'claim':claim, 'conv_prefix':conv_prefix[:-1], 'response':conv_prefix[-1], 'lynx_output':lynx_output, 'veritas_output':result_output, } train_dataset = dataset['train'] df = train_dataset.to_pandas() df2 = pd.DataFrame([new_row]) df = pd.concat([df, df2],ignore_index=True) new_train_dataset = Dataset.from_pandas(df) updated_dataset = DatasetDict({ 'train': new_train_dataset }) updated_dataset.push_to_hub("collinear-ai/veritas-demo-dataset",token=os.getenv("HF_TOKEN")) async def judge_reliability(input_style, document, conversation, claim, question, answer): start_time = time.time() if input_style == "Dialog": print(conversation) conversation = convert_to_message_array(conversation=conversation) print(conversation) outputs= await collinear.judge.veritas.conversation('72267aea-e1c7-4f38-8eb8-f5e3c2abc279',document,conversation[:-1],conversation[-1]) elif input_style == "NLI": outputs = await collinear.judge.veritas.natural_language_inference('72267aea-e1c7-4f38-8eb8-f5e3c2abc279',document,claim) elif input_style == "QA format": outputs = await collinear.judge.veritas.question_answer('72267aea-e1c7-4f38-8eb8-f5e3c2abc279',document,question,answer) output = outputs.judgement if output ==1: results = "✅" else: results = "❌" veritas_time = round(time.time() - start_time, 2) # Calculate time taken for Veritas veritas_time = ((1000* veritas_time)-700)/1000 return results, veritas_time dark_css = """ body { background-color: #000000 !important; color: #f5f5f5 !important; } .gradio-app { background-color: #000000 !important; color: #FFFFFF !important; } gradio-app { background-color: #000000 !important; color: #FFFFFF !important; } .gradio-container { background-color: #000000 !important; color: #FFFFFF !important; } .container { background-color: #000000 !important; color: #FFFFFF !important; } .form { background-color: #000000 !important; color: #FFFFFF !important; } .gap { background-color: #000000 !important; color: #FFFFFF !important; } #orange-button{ background-color: #FFA500 !important; color: #000000} #component-5 { height: 20rem !important; /* Adjust the height as needed */ overflow: auto; /* Ensure scrollbars appear for overflow content */ } .block { background-color: #000000 !important; color: #FFFFFF !important; } .wrap { background-color: #000000 !important; color: #FFFFFF !important; } textarea, input, select { background-color: #000000 !important; color: #f5f5f5 !important; border-color: #555555 !important; } label { color: #f5f5f5 !important; }""" # Create the interface using gr.Blocks with gr.Blocks(css=dark_css) as demo: gr.Markdown( """

Test Collinear Veritas and compare with Lynx 8B using the sample conversations below or type your own. Collinear Veritas can work with any input formats including NLI, QA, and dialog.

""" ) with gr.Row(): input_style_dropdown = gr.Dropdown(label="Input Style", choices=["Dialog", "NLI", "QA format"], value="Dialog", visible=True) with gr.Row(): document_input = gr.Textbox(label="Document", lines=3, visible=True, value="""August 28, 2024 SAN FRANCISCO--(BUSINESS WIRE)-- Salesforce (NYSE: CRM), the #1 AI CRM, today announced results for its second quarter fiscal 2025 ended July 31, 2024. Second Quarter Highlights Second Quarter Revenue of $9.33 Billion, up 8% Year-Over-Year ("Y/Y"), up 9% in Constant Currency ("CC"), inclusive of Subscription & Support Revenue of $8.76 Billion, up 9% Y/Y, up 10% Y/Y in CC Second Quarter GAAP Operating Margin of 19.1% and non-GAAP Operating Margin of 33.7% Current Remaining Performance Obligation of $26.5 Billion, up 10% Y/Y, up 11% Y/Y in CC Second Quarter Operating Cash Flow of $0.89 Billion, up 10% Y/Y, and Free Cash Flow of $0.76 Billion, up 20% Y/Y Returned $4.3 Billion in the Form of Share Repurchases and $0.4 Billion in Dividend Payments to Stockholders FY25 Guidance Highlights Initiates Third Quarter FY25 Revenue Guidance of $9.31 Billion to $9.36 Billion, up 7% Y/Y Maintains Full Year FY25 Revenue Guidance of $37.7 Billion to $38.0 Billion, up 8% - 9% Y/Y and Maintains Full Year FY25 Subscription & Support Revenue Growth Guidance of Slightly Below 10% Y/Y & Approximately 10% in CC Updates Full Year FY25 GAAP Operating Margin Guidance to 19.7% and Updates non-GAAP Operating Margin Guidance to 32.8% Raises Full Year FY25 Operating Cash Flow Growth Guidance to 23% to 25% Y/Y""") conversation_input = gr.Textbox(label="Conversation", lines=5, visible=True, value="""user:Salesforce has a fantastic year with Agent Force assistant: Yes, they seem to be doing quite well. user:Can you tell me their projected earnings for next year? assistant:Yes, it is about $38Bn.""") claim_input = gr.Textbox(label="Claim", lines=5, visible=False, value="Salesforce might earn about $38Bn next year") question_input = gr.Textbox(label="Question", lines=5, visible=False, value="What is Salesforce's revenue guidance for next year?") answer_input = gr.Textbox(label="Answer", lines=5, visible=False, value="Salesforce revenue guidance for next year is about $37.8Bn ") with gr.Row(): result_output = gr.Textbox(label="Veritas Model Result") veritas_time_output = gr.Textbox(label="Veritas Model Time (seconds)") lynx_output = gr.Textbox(label="Lynx Model Result") lynx_time_output = gr.Textbox(label="Lynx Model Time (seconds)") # Set the visibility of inputs based on the selected input style input_style_dropdown.change( fn=update_inputs, inputs=[input_style_dropdown], outputs=[document_input, conversation_input, claim_input, question_input, answer_input] ) # Set the function to handle the reliability check gr.Button("Submit").click( fn=judge_reliability, inputs=[input_style_dropdown, document_input, conversation_input, claim_input, question_input, answer_input], outputs=[result_output,veritas_time_output] ).then( fn=lynx, inputs=[input_style_dropdown,document_input,question_input,answer_input,claim_input,conversation_input,result_output], outputs=[lynx_output, lynx_time_output] ) # Launch the demo if __name__ == "__main__": demo.launch()