Saiteja Solleti commited on
Commit
1432cc9
·
1 Parent(s): 61bb151

UI level changes

Browse files
Files changed (3) hide show
  1. app.py +61 -32
  2. calculatescores.py +1 -1
  3. model.py +0 -12
app.py CHANGED
@@ -11,7 +11,6 @@ from generationhelper import GenerateAnswer
11
  from formatresultshelper import FormatAndScores
12
  from calculatescores import CalculateScores
13
 
14
- from model import generate_response
15
  from huggingface_hub import login
16
  from huggingface_hub import whoami
17
  from huggingface_hub import dataset_info
@@ -33,50 +32,80 @@ login(hf_token)
33
  rag_extracted_data = ExtractRagBenchData()
34
  print(rag_extracted_data.head(5))
35
 
36
- #invoke create milvus db function
37
- try:
38
- db_collection = CreateMilvusDbSchema()
39
- except Exception as e:
40
- print(f"Error creating Milvus DB schema: {e}")
41
-
42
- #insert embdeding to milvus db
43
  """
44
  EmbedAllDocumentsAndInsert(QUERY_EMBEDDING_MODEL, rag_extracted_data, db_collection, window_size=WINDOW_SIZE, overlap=OVERLAP)
45
  """
46
- query = "what would the net revenue have been in 2015 if there wasn't a stipulated settlement from the business combination in october 2015?"
47
 
48
- results_for_top10_chunks = SearchTopKDocuments(db_collection, query, QUERY_EMBEDDING_MODEL, top_k=RETRIVE_TOP_K_SIZE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- reranked_results = FineTuneAndRerankSearchResults(results_for_top10_chunks, rag_extracted_data, query, RERANKING_MODEL)
 
51
 
52
- answer = GenerateAnswer(query, reranked_results.head(3), PROMPT_MODEL)
53
 
54
- completion_result,relevant_sentence_keys,all_utilized_sentence_keys,support_keys,support_level = FormatAndScores(query, reranked_results.head(1), answer, EVAL_MODEL)
 
 
55
 
56
 
57
- print(relevant_sentence_keys)
58
- print(all_utilized_sentence_keys)
59
- print(support_keys)
60
- print(support_level)
61
- print(completion_result)
62
 
63
- document_id = reranked_results.head(1)['doc_id'].values[0]
64
- extarcted_row_for_given_id = rag_extracted_data[rag_extracted_data["id"]==document_id]
 
 
 
 
 
65
 
66
- score1, score2, score3 = CalculateScores(relevant_sentence_keys,all_utilized_sentence_keys,support_keys,support_level,extarcted_row_for_given_id)
67
 
68
- print(score1)
69
- print(score2)
70
- print(score3)
 
71
 
 
 
 
 
 
 
 
 
72
 
73
- def chatbot(prompt):
74
- return whoami()
 
75
 
76
- iface = gr.Interface(fn=chatbot,
77
- inputs="text",
78
- outputs="text",
79
- title="Capstone Project Group 10")
80
 
81
- if __name__ == "__main__":
82
- iface.launch()
 
11
  from formatresultshelper import FormatAndScores
12
  from calculatescores import CalculateScores
13
 
 
14
  from huggingface_hub import login
15
  from huggingface_hub import whoami
16
  from huggingface_hub import dataset_info
 
32
  rag_extracted_data = ExtractRagBenchData()
33
  print(rag_extracted_data.head(5))
34
 
 
 
 
 
 
 
 
35
  """
36
  EmbedAllDocumentsAndInsert(QUERY_EMBEDDING_MODEL, rag_extracted_data, db_collection, window_size=WINDOW_SIZE, overlap=OVERLAP)
37
  """
 
38
 
39
+ def EvaluateRAGModel(query, evaluation_model):
40
+ #invoke create milvus db function
41
+ try:
42
+ db_collection = CreateMilvusDbSchema()
43
+ except Exception as e:
44
+ print(f"Error creating Milvus DB schema: {e}")
45
+
46
+ #insert embdeding to milvus db
47
+
48
+ #query = "what would the net revenue have been in 2015 if there wasn't a stipulated settlement from the business combination in october 2015?"
49
+
50
+ results_for_top10_chunks = SearchTopKDocuments(db_collection, query, QUERY_EMBEDDING_MODEL, top_k=RETRIVE_TOP_K_SIZE)
51
+
52
+ reranked_results = FineTuneAndRerankSearchResults(results_for_top10_chunks, rag_extracted_data, query, RERANKING_MODEL)
53
+
54
+ answer = GenerateAnswer(query, reranked_results.head(3), PROMPT_MODEL)
55
+
56
+ completion_result,relevant_sentence_keys,all_utilized_sentence_keys,support_keys,support_level = FormatAndScores(query, reranked_results.head(1), answer, EVAL_MODEL)
57
+
58
+
59
+ print(relevant_sentence_keys)
60
+ print(all_utilized_sentence_keys)
61
+ print(support_keys)
62
+ print(support_level)
63
+ print(completion_result)
64
 
65
+ document_id = reranked_results.head(1)['doc_id'].values[0]
66
+ extarcted_row_for_given_id = rag_extracted_data[rag_extracted_data["id"]==document_id]
67
 
68
+ rmsecontextrel, rmsecontextutil, aucscore = CalculateScores(relevant_sentence_keys,all_utilized_sentence_keys,support_keys,support_level,extarcted_row_for_given_id)
69
 
70
+ print(rmsecontextrel)
71
+ print(rmsecontextutil)
72
+ print(aucscore)
73
 
74
 
75
+ # Create Gradio UI
76
+ with gr.Blocks() as iface:
77
+ gr.Markdown("## Capstone Project Group 10 - Model Evaluation")
 
 
78
 
79
+ with gr.Row():
80
+ question_input = gr.Textbox(label="Enter your Question", lines=2)
81
+ dropdown_input = gr.Dropdown(
82
+ ["LLaMA 3.3", "Mistral &B", "Model C"],
83
+ value="LLaMA 3.3",
84
+ label="Select a Model"
85
+ )
86
 
87
+ submit_button = gr.Button("Evaluate Model")
88
 
89
+ with gr.Row():
90
+ with gr.Column():
91
+ gr.Markdown("### Output 1")
92
+ response = gr.Textbox(interactive=False, show_label=False, lines=2)
93
 
94
+ with gr.Row():
95
+ with gr.Column():
96
+ gr.Markdown("### Output 2")
97
+ output2 = gr.Textbox(interactive=False, show_label=False, lines=2)
98
+
99
+ with gr.Column():
100
+ gr.Markdown("### Output 3")
101
+ output3 = gr.Textbox(interactive=False, show_label=False, lines=2)
102
 
103
+ with gr.Column():
104
+ gr.Markdown("### Output 4")
105
+ output4 = gr.Textbox(interactive=False, show_label=False, lines=2)
106
 
107
+ # Connect submit button to evaluation function
108
+ submit_button.click(EvaluateRAGModel, inputs=[question_input, dropdown_input], outputs=[response, output2, output3, output4])
 
 
109
 
110
+ # Run the Gradio app
111
+ iface.launch()
calculatescores.py CHANGED
@@ -1,7 +1,7 @@
1
  import formatresultshelper
2
  import numpy as np
3
 
4
- from sklearn.metrics import mean_squared_error, roc_auc_score
5
 
6
  #Defined as utilized documents / retrieved documents for the query
7
  def compute_context_relevance(relevant_sentences, support_keys):
 
1
  import formatresultshelper
2
  import numpy as np
3
 
4
+ from sklearn.metrics import roc_auc_score
5
 
6
  #Defined as utilized documents / retrieved documents for the query
7
  def compute_context_relevance(relevant_sentences, support_keys):
model.py DELETED
@@ -1,12 +0,0 @@
1
- from transformers import pipeline
2
-
3
- def load_model():
4
- """Loads the model from Hugging Face."""
5
- model = pipeline("text-generation", model="gpt2") # Replace with your model
6
- return model
7
-
8
- def generate_response(prompt):
9
- """Generates a response using the model."""
10
- model = load_model()
11
- response = model(prompt, max_length=100, do_sample=True)
12
- return response[0]["generated_text"]