Spaces:
Sleeping
Sleeping
File size: 6,551 Bytes
0703e71 210b40c e303329 d232ed1 b5e9a85 5b6755d ad3245e 57cf6db d232ed1 0703e71 5b6755d 80390bf 60ffe71 5b6755d 48fb46d 5b6755d f03cfb2 bd26917 f03cfb2 60ffe71 210b40c 090dd00 210b40c 090dd00 210b40c 090dd00 210b40c f21f2ed a73aede f21f2ed a73aede f21f2ed 0a402b2 f21f2ed 0a402b2 0108e87 f21f2ed e899803 5b6755d f03cfb2 80390bf 090dd00 bc3ac0f 5b6755d 76c4bfe 8672bbc e899803 8d436a0 896848b 5b6755d 896848b d232ed1 f03cfb2 bd26917 9d86cbe 896848b bd26917 f03cfb2 9d86cbe f21f2ed 896848b f03cfb2 bd26917 f03cfb2 bd26917 f03cfb2 bd26917 f03cfb2 d232ed1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import spaces
import openai
import json
import re
HF_TOKEN = os.environ.get("HF_TOKEN", None)
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
# if torch.cuda.is_available():
# device = "cuda:0"
# else:
# device = "cpu"
# Set up client to call inference
client=openai.OpenAI(
base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
api_key=LEPTON_API_TOKEN
)
# Create own model
# tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
# model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto")
# model.gradient_checkpointing_enable()
# def load_model_and_tokenizer(model_choice):
# if model_choice == "Patronus Lynx 8B":
# model_name = "PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"
# else:
# model_name = "PatronusAI/Llama-3-Patronus-Lynx-70B-Instruct"
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto").to(device)
# model.gradient_checkpointing_enable()
# return tokenizer, model
PROMPT = """
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
--
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
{question}
--
DOCUMENT:
{document}
--
ANSWER:
{answer}
--
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
"""
HEADER = """
# Patronus Lynx Demo
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:50px;">
<td style="text-align: center;">
<a href="https://www.patronus.ai">
<img src="https://cdn.prod.website-files.com/64e655d42d3be60f582d0472/64ede352897bcddbe2d41207_patronusai_final_logo.svg" width="200" height="40" />
</a>
</td>
</tr>
</table>
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:30px;">
<td style="text-align: center;">
<a href="https://huggingface.co/PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Model_Card-Huggingface-orange" height="20"></a>
</td>
<td style="text-align: center;">
<a href="https://github.com/patronus-ai/Lynx-hallucination-detection"><img src="https://postimage.me/images/2024/03/04/GitHub_Logo_White.png" width="100" height="20"></a>
</td>
<td style="text-align: center; color: white;">
<a href="https://arxiv.org/abs/2407.08488"><img src="https://img.shields.io/badge/arXiv-2407.08488-b31b1b.svg" height="20"></a>
</td>
</tr>
</table>
**Patronus Lynx** is a state-of-the-art open-source model for hallucination detection.
**Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
"""
def clean_json_string(json_str):
# Replace single quotes with double quotes, but not apostrophes within words
json_str = re.sub(r"(?<!\\)'([^']*)'", r'"\1"', json_str)
# Add quotes around PASS or FAIL if they're not already quoted
json_str = re.sub(r'"SCORE":\s*(PASS|FAIL)', r'"SCORE": "\1"', json_str)
return json_st
# @spaces.GPU()
# def model_call(question, document, answer, tokenizer, model):
def model_call(question, document, answer):
# device = next(model.parameters()).device
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
print("ENTIRE NEW_FORMAT", NEW_FORMAT)
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=NEW_FORMAT
)
print("RESPONSE FROM CLIENT:", response)
generated_text = clean_json_string(response.choices[0].text)
generated_text = json.loads(generated_text)
print("GENERATED TEXT", generated_text)
print("type of GENERATED TEXT", type(generated_text))
reasoning = generated_text["REASONING"][0]
score = generated_text["SCORE"]
# inputs = tokenizer(NEW_FORMAT, return_tensors="pt")
# print("INPUTS", inputs)
# input_ids = inputs.input_ids
# attention_mask = inputs.attention_mask
# generate_kwargs = dict(
# input_ids=input_ids,
# do_sample=True,
# attention_mask=attention_mask,
# pad_token_id=tokenizer.eos_token_id,
# )
# print("GENERATE_KWARGS", generate_kwargs)
# with torch.no_grad():
# outputs = model.generate(**generate_kwargs)
# print("OUTPUTS", outputs)
# generated_text = tokenizer.decode(outputs[0])
# print(generated_text)
return reasoning, score
# def update_model(model_choice, tokenizer_state, model_state):
# new_tokenizer, new_model = load_model_and_tokenizer(model_choice)
# print("UPDATED MODEL", new_tokenizer, new_model)
# return new_tokenizer, new_model
inputs = [
gr.Textbox(label="Question"),
gr.Textbox(label="Document"),
gr.Textbox(label="Answer")
]
outputs = [
gr.Textbox(label="Reasoning"),
gr.Textbox(label="Score")
]
# submit_button = gr.Button("Submit")
with gr.Blocks() as demo:
gr.Markdown(HEADER)
gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
# tokenizer_state = gr.State()
# model_state = gr.State()
# model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model")
# model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
# submit_button.click(fn=model_call, inputs=inputs, outputs=output)
# initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
# demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])
demo.launch()
|