Spaces:
Sleeping
Sleeping
Allen Park
commited on
Commit
·
1418604
1
Parent(s):
f663f9b
only update the client base_url rather than toggling between different client elements
Browse files
app.py
CHANGED
@@ -71,17 +71,11 @@ HEADER = """
|
|
71 |
**Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
|
72 |
"""
|
73 |
|
74 |
-
def
|
75 |
if model_name == "Patronus Lynx 8B":
|
76 |
-
return
|
77 |
-
base_url="https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/",
|
78 |
-
api_key=LEPTON_API_TOKEN
|
79 |
-
)
|
80 |
elif model_name == "Patronus Lynx 70B":
|
81 |
-
return
|
82 |
-
base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
|
83 |
-
api_key=LEPTON_API_TOKEN
|
84 |
-
)
|
85 |
|
86 |
def parse_patronus_lynx_response(
|
87 |
response: str,
|
@@ -109,7 +103,11 @@ def parse_patronus_lynx_response(
|
|
109 |
|
110 |
return hallucination, reasoning
|
111 |
|
112 |
-
def model_call(question, document, answer,
|
|
|
|
|
|
|
|
|
113 |
if question == "" or document == "" or answer == "":
|
114 |
return "", ""
|
115 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
@@ -124,20 +122,20 @@ def model_call(question, document, answer, client):
|
|
124 |
combined_reasoning = " ".join(reasoning)[1:-1]
|
125 |
return combined_reasoning, score
|
126 |
|
127 |
-
inputs = [
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
]
|
132 |
-
outputs = [
|
133 |
-
|
134 |
-
|
135 |
-
]
|
136 |
|
137 |
with gr.Blocks() as demo:
|
138 |
-
|
139 |
gr.Markdown(HEADER)
|
140 |
-
gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
|
141 |
model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model", interactive=True)
|
142 |
with gr.Row():
|
143 |
with gr.Column(scale=1):
|
@@ -150,11 +148,10 @@ with gr.Blocks() as demo:
|
|
150 |
with gr.Column(scale=1):
|
151 |
reasoning = gr.Textbox(label="Reasoning")
|
152 |
score = gr.Textbox(label="Score (FAIL if Hallucinated, PASS if not)")
|
|
|
153 |
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
document.submit(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
|
159 |
-
answer.submit(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
|
160 |
demo.launch()
|
|
|
71 |
**Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
|
72 |
"""
|
73 |
|
74 |
+
def update_client_base_url(model_name):
|
75 |
if model_name == "Patronus Lynx 8B":
|
76 |
+
return "https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/"
|
|
|
|
|
|
|
77 |
elif model_name == "Patronus Lynx 70B":
|
78 |
+
return "https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/"
|
|
|
|
|
|
|
79 |
|
80 |
def parse_patronus_lynx_response(
|
81 |
response: str,
|
|
|
103 |
|
104 |
return hallucination, reasoning
|
105 |
|
106 |
+
def model_call(question, document, answer, client_base_url):
|
107 |
+
client = openai.OpenAI(
|
108 |
+
base_url=client_base_url,
|
109 |
+
api_key=LEPTON_API_TOKEN
|
110 |
+
)
|
111 |
if question == "" or document == "" or answer == "":
|
112 |
return "", ""
|
113 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
|
|
122 |
combined_reasoning = " ".join(reasoning)[1:-1]
|
123 |
return combined_reasoning, score
|
124 |
|
125 |
+
# inputs = [
|
126 |
+
# gr.Textbox(label="Question"),
|
127 |
+
# gr.Textbox(label="Document"),
|
128 |
+
# gr.Textbox(label="Answer")
|
129 |
+
# ]
|
130 |
+
# outputs = [
|
131 |
+
# gr.Textbox(label="Reasoning"),
|
132 |
+
# gr.Textbox(label="Score")
|
133 |
+
# ]
|
134 |
|
135 |
with gr.Blocks() as demo:
|
136 |
+
base_url_state = gr.State(update_client_base_url("Patronus Lynx 8B"))
|
137 |
gr.Markdown(HEADER)
|
138 |
+
# gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
|
139 |
model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model", interactive=True)
|
140 |
with gr.Row():
|
141 |
with gr.Column(scale=1):
|
|
|
148 |
with gr.Column(scale=1):
|
149 |
reasoning = gr.Textbox(label="Reasoning")
|
150 |
score = gr.Textbox(label="Score (FAIL if Hallucinated, PASS if not)")
|
151 |
+
model_dropdown.change(fn=update_client_base_url, inputs=[model_dropdown], outputs=[base_url_state])
|
152 |
|
153 |
+
submit_button.click(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
|
154 |
+
question.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
|
155 |
+
document.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
|
156 |
+
answer.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
|
|
|
|
|
157 |
demo.launch()
|