Saiteja Solleti commited on
Commit
0ed4471
·
1 Parent(s): 5633e27

correcting scores

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  import os
 
3
 
4
  from loaddataset import ExtractRagBenchData
5
  from createmilvusschema import CreateMilvusDbSchema
@@ -36,7 +37,11 @@ print(rag_extracted_data.head(5))
36
  EmbedAllDocumentsAndInsert(QUERY_EMBEDDING_MODEL, rag_extracted_data, db_collection, window_size=WINDOW_SIZE, overlap=OVERLAP)
37
  """
38
 
39
- def EvaluateRAGModel(query, evaluation_model):
 
 
 
 
40
  #invoke create milvus db function
41
  try:
42
  db_collection = CreateMilvusDbSchema()
@@ -70,18 +75,21 @@ def EvaluateRAGModel(query, evaluation_model):
70
  print(rmsecontextrel)
71
  print(rmsecontextutil)
72
  print(aucscore)
 
 
 
73
 
74
- return answer, rmsecontextrel, rmsecontextutil, aucscore
75
 
76
 
77
  # Create Gradio UI
78
  with gr.Blocks() as iface:
79
- gr.Markdown("## Capstone Project Group 10 - Model Evaluation")
80
 
81
  with gr.Row():
82
  question_input = gr.Textbox(label="Enter your Question", lines=2)
83
  dropdown_input = gr.Dropdown(
84
- ["LLaMA 3.3", "Mistral &B", "Model C"],
85
  value="LLaMA 3.3",
86
  label="Select a Model"
87
  )
@@ -91,7 +99,7 @@ with gr.Blocks() as iface:
91
  with gr.Row():
92
  with gr.Column():
93
  gr.Markdown("### Response")
94
- response = gr.Textbox(interactive=False, show_label=False, lines=2)
95
 
96
  with gr.Row():
97
  with gr.Column():
@@ -106,8 +114,13 @@ with gr.Blocks() as iface:
106
  gr.Markdown("### AUCROC ADHERENCE")
107
  aucscore = gr.Textbox(interactive=False, show_label=False, lines=2)
108
 
 
 
 
 
 
109
  # Connect submit button to evaluation function
110
- submit_button.click(EvaluateRAGModel, inputs=[question_input, dropdown_input], outputs=[response, rmsecontextrel, rmsecontextutil, aucscore])
111
 
112
  # Run the Gradio app
113
  iface.launch()
 
1
  import gradio as gr
2
  import os
3
+ import time
4
 
5
  from loaddataset import ExtractRagBenchData
6
  from createmilvusschema import CreateMilvusDbSchema
 
37
  EmbedAllDocumentsAndInsert(QUERY_EMBEDDING_MODEL, rag_extracted_data, db_collection, window_size=WINDOW_SIZE, overlap=OVERLAP)
38
  """
39
 
40
+ def EvaluateRAGModel(question, evaluation_model):
41
+ start_time = time.time()
42
+
43
+ query = question.strip()
44
+
45
  #invoke create milvus db function
46
  try:
47
  db_collection = CreateMilvusDbSchema()
 
75
  print(rmsecontextrel)
76
  print(rmsecontextutil)
77
  print(aucscore)
78
+ end_time = time.time()
79
+
80
+ execution_time = end_time - start_time
81
 
82
+ return answer, rmsecontextrel, rmsecontextutil, aucscore, execution_time
83
 
84
 
85
  # Create Gradio UI
86
  with gr.Blocks() as iface:
87
+ gr.Markdown("## Capstone Project Group 10 ")
88
 
89
  with gr.Row():
90
  question_input = gr.Textbox(label="Enter your Question", lines=2)
91
  dropdown_input = gr.Dropdown(
92
+ ["LLaMA 3.3", "Mistral 7B"],
93
  value="LLaMA 3.3",
94
  label="Select a Model"
95
  )
 
99
  with gr.Row():
100
  with gr.Column():
101
  gr.Markdown("### Response")
102
+ response = gr.Textbox(interactive=False, show_label=False, lines=4)
103
 
104
  with gr.Row():
105
  with gr.Column():
 
114
  gr.Markdown("### AUCROC ADHERENCE")
115
  aucscore = gr.Textbox(interactive=False, show_label=False, lines=2)
116
 
117
+ with gr.Column():
118
+ gr.Markdown("### PROCESSING TIME")
119
+ processingTime = gr.Textbox(interactive=False, show_label=False, lines=2)
120
+
121
+
122
  # Connect submit button to evaluation function
123
+ submit_button.click(EvaluateRAGModel, inputs=[question_input, dropdown_input], outputs=[response, rmsecontextrel, rmsecontextutil, aucscore, processingTime])
124
 
125
  # Run the Gradio app
126
  iface.launch()