Spaces:
Sleeping
Sleeping
jsonify text response string
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
import openai
|
|
|
7 |
|
8 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
9 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
@@ -96,7 +97,7 @@ def model_call(question, document, answer):
|
|
96 |
prompt=NEW_FORMAT
|
97 |
)
|
98 |
print("RESPONSE FROM CLIENT:", response)
|
99 |
-
generated_text = response.choices[0].text
|
100 |
reasoning = generated_text["REASONING"][0]
|
101 |
score = generated_text["SCORE"]
|
102 |
# inputs = tokenizer(NEW_FORMAT, return_tensors="pt")
|
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
import openai
|
7 |
+
import json
|
8 |
|
9 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
10 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
|
|
97 |
prompt=NEW_FORMAT
|
98 |
)
|
99 |
print("RESPONSE FROM CLIENT:", response)
|
100 |
+
generated_text = json(response.choices[0].text)
|
101 |
reasoning = generated_text["REASONING"][0]
|
102 |
score = generated_text["SCORE"]
|
103 |
# inputs = tokenizer(NEW_FORMAT, return_tensors="pt")
|