Spaces:
Running
on
T4
Running
on
T4
mvectors
Browse files
semantic_search/llm_eval.py
CHANGED
@@ -72,7 +72,7 @@ def eval(question, answers):
|
|
72 |
prompt = prompt.format(query, search_results)
|
73 |
response = json.loads(llm.invoke_llm_model(prompt,False))
|
74 |
#response = textgen_llm(prompt)
|
75 |
-
print("Response from LLM: ", response)
|
76 |
# inter_trim =response.split("[")[1]
|
77 |
# final_out = json.loads('{"results":['+inter_trim.split("]")[0]+']}')
|
78 |
llm_scores = []
|
@@ -93,8 +93,8 @@ def eval(question, answers):
|
|
93 |
current_scores.append(i['score'])
|
94 |
|
95 |
|
96 |
-
print("LLM Scores: ", llm_scores)
|
97 |
-
print("Current Scores: ", current_scores)
|
98 |
x = np.array(llm_scores)
|
99 |
x = x.reshape(-1, 1)
|
100 |
x_norm = (pre.MinMaxScaler().fit_transform(x)).flatten().tolist()
|
|
|
72 |
prompt = prompt.format(query, search_results)
|
73 |
response = json.loads(llm.invoke_llm_model(prompt,False))
|
74 |
#response = textgen_llm(prompt)
|
75 |
+
#print("Response from LLM: ", response)
|
76 |
# inter_trim =response.split("[")[1]
|
77 |
# final_out = json.loads('{"results":['+inter_trim.split("]")[0]+']}')
|
78 |
llm_scores = []
|
|
|
93 |
current_scores.append(i['score'])
|
94 |
|
95 |
|
96 |
+
#print("LLM Scores: ", llm_scores)
|
97 |
+
#print("Current Scores: ", current_scores)
|
98 |
x = np.array(llm_scores)
|
99 |
x = x.reshape(-1, 1)
|
100 |
x_norm = (pre.MinMaxScaler().fit_transform(x)).flatten().tolist()
|