diksha
commited on
Commit
·
a0ae865
1
Parent(s):
d44dd67
Complete_Finetuning
Browse files- .gitattributes +1 -0
- Dockerfile +37 -0
- Input_PDF.pdf +0 -0
- evaluation_handler.py +83 -0
- handler.py +186 -0
- llm_evaluation.py +119 -0
- report.pdf +3 -0
- requirements.txt +19 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
# Set the working directory
|
4 |
+
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Update package lists, install dependencies, and clean up to reduce image size
|
8 |
+
|
9 |
+
RUN apt update && apt install -y \
|
10 |
+
|
11 |
+
libgl1-mesa-glx \
|
12 |
+
|
13 |
+
curl && \
|
14 |
+
|
15 |
+
rm -rf /var/lib/apt/lists/*
|
16 |
+
|
17 |
+
# Copy the requirements file first to leverage Docker caching
|
18 |
+
|
19 |
+
COPY requirements.txt ./
|
20 |
+
|
21 |
+
# Install Python dependencies with upgraded pip
|
22 |
+
RUN python3 -m pip install --upgrade pip && \
|
23 |
+
python3 -m pip install --no-cache-dir -r requirements.txt && \
|
24 |
+
python3 -m pip list
|
25 |
+
|
26 |
+
# Environment variables
|
27 |
+
ENV PYTHONPATH=/app
|
28 |
+
ENV PYTHONUNBUFFERED=1
|
29 |
+
|
30 |
+
# Copy the rest of the application code
|
31 |
+
|
32 |
+
COPY . .
|
33 |
+
|
34 |
+
# Set the default command to run your application
|
35 |
+
|
36 |
+
CMD ["python3", "handler.py"]
|
37 |
+
|
Input_PDF.pdf
ADDED
File without changes
|
evaluation_handler.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import json
|
3 |
+
import subprocess
|
4 |
+
import time
|
5 |
+
from typing import Dict, Any
|
6 |
+
import runpod
|
7 |
+
|
8 |
+
def handler(job: Dict[str, Any]) -> Dict[str, Any]:
|
9 |
+
"""
|
10 |
+
Handler function for running `llm_evaluation.py` in a RunPod serverless environment.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
job (Dict[str, Any]): The job input containing parameters for evaluation.
|
14 |
+
|
15 |
+
Returns:
|
16 |
+
Dict[str, Any]: Evaluation results or error details.
|
17 |
+
"""
|
18 |
+
start_time = time.time()
|
19 |
+
logging.info("Evaluation handler started.")
|
20 |
+
|
21 |
+
# Extract job input
|
22 |
+
job_input = job.get('input', {})
|
23 |
+
model_name = job_input.get('model_name')
|
24 |
+
|
25 |
+
if not model_name:
|
26 |
+
logging.error("Missing required field: 'model_name'")
|
27 |
+
return {
|
28 |
+
"status": "error",
|
29 |
+
"error": "Missing required field: 'model_name'"
|
30 |
+
}
|
31 |
+
|
32 |
+
try:
|
33 |
+
# Prepare evaluation input
|
34 |
+
evaluation_input = json.dumps({"model_name": model_name})
|
35 |
+
logging.info(f"Starting evaluation for model: {model_name}")
|
36 |
+
|
37 |
+
# Run `llm_evaluation.py` as a subprocess
|
38 |
+
result = subprocess.run(
|
39 |
+
['python3', 'llm_evaluation.py', evaluation_input],
|
40 |
+
capture_output=True,
|
41 |
+
text=True,
|
42 |
+
check=True
|
43 |
+
)
|
44 |
+
logging.info("Model evaluation completed successfully.")
|
45 |
+
|
46 |
+
# Attempt to parse the output as JSON
|
47 |
+
try:
|
48 |
+
evaluation_results = json.loads(result.stdout)
|
49 |
+
except json.JSONDecodeError:
|
50 |
+
evaluation_results = {"raw_output": result.stdout}
|
51 |
+
|
52 |
+
return {
|
53 |
+
"status": "success",
|
54 |
+
"model_name": model_name,
|
55 |
+
"processing_time": time.time() - start_time,
|
56 |
+
"evaluation_results": evaluation_results
|
57 |
+
}
|
58 |
+
|
59 |
+
except subprocess.CalledProcessError as e:
|
60 |
+
logging.error(f"Evaluation process failed: {e.stderr}")
|
61 |
+
return {
|
62 |
+
"status": "error",
|
63 |
+
"error": f"Evaluation process failed: {e.stderr}",
|
64 |
+
"stdout": e.stdout,
|
65 |
+
"stderr": e.stderr
|
66 |
+
}
|
67 |
+
except Exception as e:
|
68 |
+
logging.error(f"Unhandled error during evaluation: {str(e)}")
|
69 |
+
return {
|
70 |
+
"status": "error",
|
71 |
+
"error": str(e)
|
72 |
+
}
|
73 |
+
|
74 |
+
if __name__ == "__main__":
|
75 |
+
# Configure logging
|
76 |
+
logging.basicConfig(
|
77 |
+
level=logging.INFO,
|
78 |
+
format="%(asctime)s - %(levelname)s - %(message)s"
|
79 |
+
)
|
80 |
+
|
81 |
+
# Start RunPod serverless endpoint
|
82 |
+
runpod.serverless.start({"handler": handler})
|
83 |
+
|
handler.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import runpod
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
import uuid
|
6 |
+
import json
|
7 |
+
import time
|
8 |
+
import subprocess
|
9 |
+
from typing import Dict, Any
|
10 |
+
from azure.storage.blob import BlobServiceClient
|
11 |
+
|
12 |
+
def get_azure_connection_string():
|
13 |
+
"""Get Azure connection string from environment variable"""
|
14 |
+
conn_string ="DefaultEndpointsProtocol=https;AccountName=transcribedblobstorage;AccountKey=1Z7yKPP5DLbxnoHdh7NmHgwg3dFLaDiYHUELdid7dzfzR6/DvkZnnzpJ30lrXIMhtD5GYKo+71jP+AStC1TEvA==;EndpointSuffix=core.windows.net"
|
15 |
+
if not conn_string:
|
16 |
+
raise ValueError("Azure Storage connection string not found in environment variables")
|
17 |
+
return conn_string
|
18 |
+
|
19 |
+
# def upload_file(file_path: str, blob_name: str, container_name: str) -> str:
|
20 |
+
# """Upload a file to Azure Blob Storage"""
|
21 |
+
# if not os.path.isfile(file_path):
|
22 |
+
# raise FileNotFoundError(f"The specified file does not exist: {file_path}")
|
23 |
+
|
24 |
+
# connection_string = get_azure_connection_string()
|
25 |
+
# blob_service_client = BlobServiceClient.from_connection_string(connection_string)
|
26 |
+
# container_client = blob_service_client.get_container_client(container_name)
|
27 |
+
|
28 |
+
# with open(file_path, 'rb') as file:
|
29 |
+
# blob_client = container_client.get_blob_client(blob_name)
|
30 |
+
# blob_client.upload_blob(file)
|
31 |
+
# return blob_client.blob_name
|
32 |
+
|
33 |
+
def download_blob(blob_name: str, download_file_path: str, container_name: str) -> None:
|
34 |
+
"""Download a file from Azure Blob Storage"""
|
35 |
+
connection_string = get_azure_connection_string()
|
36 |
+
blob_service_client = BlobServiceClient.from_connection_string(connection_string)
|
37 |
+
container_client = blob_service_client.get_container_client(container_name)
|
38 |
+
blob_client = container_client.get_blob_client(blob_name)
|
39 |
+
|
40 |
+
os.makedirs(os.path.dirname(download_file_path), exist_ok=True)
|
41 |
+
|
42 |
+
with open(download_file_path, "wb") as download_file:
|
43 |
+
download_stream = blob_client.download_blob()
|
44 |
+
download_file.write(download_stream.readall())
|
45 |
+
logging.info(f"Blob '{blob_name}' downloaded to '{download_file_path}'")
|
46 |
+
|
47 |
+
def clean_directory(directory: str) -> None:
|
48 |
+
"""Clean up a directory by removing all files and subdirectories"""
|
49 |
+
if os.path.exists(directory):
|
50 |
+
for filename in os.listdir(directory):
|
51 |
+
file_path = os.path.join(directory, filename)
|
52 |
+
try:
|
53 |
+
if os.path.isfile(file_path) or os.path.islink(file_path):
|
54 |
+
os.remove(file_path)
|
55 |
+
elif os.path.isdir(file_path):
|
56 |
+
shutil.rmtree(file_path)
|
57 |
+
except Exception as e:
|
58 |
+
logging.error(f'Failed to delete {file_path}. Reason: {e}')
|
59 |
+
|
60 |
+
def handler(job: Dict[str, Any]) -> Dict[str, Any]:
|
61 |
+
start_time = time.time()
|
62 |
+
logging.info("Handler function started")
|
63 |
+
|
64 |
+
# Extract job input and validate
|
65 |
+
job_input = job.get('input', {})
|
66 |
+
required_fields = ['pdf_file', 'system_prompt', 'model_name', 'max_step', 'learning_rate', 'epochs', 'container']
|
67 |
+
missing_fields = [field for field in required_fields if field not in job_input]
|
68 |
+
|
69 |
+
if missing_fields:
|
70 |
+
return {
|
71 |
+
"status": "error",
|
72 |
+
"error": f"Missing required fields: {', '.join(missing_fields)}"
|
73 |
+
}
|
74 |
+
|
75 |
+
work_dir = os.path.abspath(f"/tmp/work_{str(uuid.uuid4())}")
|
76 |
+
|
77 |
+
try:
|
78 |
+
# Create working directory
|
79 |
+
os.makedirs(work_dir, exist_ok=True)
|
80 |
+
logging.info(f"Working directory created: {work_dir}")
|
81 |
+
|
82 |
+
# Download and process PDF
|
83 |
+
download_path = os.path.join(work_dir, "Input_PDF.pdf")
|
84 |
+
download_blob(job_input['pdf_file'], download_path, job_input['container'])
|
85 |
+
|
86 |
+
# Verify downloaded file exists
|
87 |
+
if not os.path.exists(download_path):
|
88 |
+
raise FileNotFoundError(f"Downloaded PDF file not found at: {download_path}")
|
89 |
+
|
90 |
+
# Save pipeline input as a JSON file
|
91 |
+
pipeline_input_path = os.path.join(work_dir, "pipeline_input.json")
|
92 |
+
pipeline_input = {
|
93 |
+
"pdf_file": download_path,
|
94 |
+
"system_prompt": job_input['system_prompt'],
|
95 |
+
"model_name": job_input['model_name'],
|
96 |
+
"max_step": job_input['max_step'],
|
97 |
+
"learning_rate": job_input['learning_rate'],
|
98 |
+
"epochs": job_input['epochs']
|
99 |
+
}
|
100 |
+
|
101 |
+
with open(pipeline_input_path, 'w') as f:
|
102 |
+
json.dump(pipeline_input, f)
|
103 |
+
|
104 |
+
# Run fine-tuning and evaluation
|
105 |
+
return run_pipeline_and_evaluate(pipeline_input_path, job_input['model_name'], start_time)
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
error_message = f"Job failed after {time.time() - start_time:.2f} seconds: {str(e)}"
|
109 |
+
logging.error(error_message)
|
110 |
+
return {
|
111 |
+
"status": "error",
|
112 |
+
"error": error_message
|
113 |
+
}
|
114 |
+
|
115 |
+
finally:
|
116 |
+
# Clean up working directory
|
117 |
+
try:
|
118 |
+
clean_directory(work_dir)
|
119 |
+
os.rmdir(work_dir)
|
120 |
+
except Exception as e:
|
121 |
+
logging.error(f"Failed to clean up working directory: {str(e)}")
|
122 |
+
|
123 |
+
def run_pipeline_and_evaluate(pipeline_input_path: str, model_name: str, start_time: float) -> Dict[str, Any]:
|
124 |
+
try:
|
125 |
+
# Read the pipeline input file
|
126 |
+
with open(pipeline_input_path, 'r') as f:
|
127 |
+
pipeline_input = json.load(f)
|
128 |
+
|
129 |
+
# Convert the input to a JSON string for passing as an argument
|
130 |
+
pipeline_input_str = json.dumps(pipeline_input)
|
131 |
+
|
132 |
+
# Run fine-tuning pipeline with JSON string as argument
|
133 |
+
logging.info(f"Running pipeline with input: {pipeline_input_str[:100]}...")
|
134 |
+
finetuning_result = subprocess.run(
|
135 |
+
['python3', 'Finetuning_Pipeline.py', pipeline_input_str],
|
136 |
+
capture_output=True,
|
137 |
+
text=True,
|
138 |
+
check=True
|
139 |
+
)
|
140 |
+
logging.info("Fine-tuning completed successfully")
|
141 |
+
|
142 |
+
# Run evaluation
|
143 |
+
evaluation_input = json.dumps({"model_name": model_name})
|
144 |
+
result = subprocess.run(
|
145 |
+
['python3', 'llm_evaluation.py', evaluation_input],
|
146 |
+
capture_output=True,
|
147 |
+
text=True,
|
148 |
+
check=True
|
149 |
+
)
|
150 |
+
|
151 |
+
try:
|
152 |
+
# Try to parse the evaluation output as JSON
|
153 |
+
evaluation_results = json.loads(result.stdout)
|
154 |
+
except json.JSONDecodeError:
|
155 |
+
# If parsing fails, use the raw output
|
156 |
+
evaluation_results = {"raw_output": result.stdout}
|
157 |
+
|
158 |
+
return {
|
159 |
+
"status": "success",
|
160 |
+
"model_name": f"PharynxAI/{model_name}",
|
161 |
+
"processing_time": time.time() - start_time,
|
162 |
+
"evaluation_results": evaluation_results
|
163 |
+
}
|
164 |
+
|
165 |
+
except subprocess.CalledProcessError as e:
|
166 |
+
error_message = f"Pipeline process failed: {e.stderr}"
|
167 |
+
logging.error(error_message)
|
168 |
+
return {
|
169 |
+
"status": "error",
|
170 |
+
"error": error_message,
|
171 |
+
"stdout": e.stdout,
|
172 |
+
"stderr": e.stderr
|
173 |
+
}
|
174 |
+
except Exception as e:
|
175 |
+
error_message = f"Pipeline execution failed: {str(e)}"
|
176 |
+
logging.error(error_message)
|
177 |
+
return {
|
178 |
+
"status": "error",
|
179 |
+
"error": error_message
|
180 |
+
}
|
181 |
+
if __name__ == "__main__":
|
182 |
+
logging.basicConfig(
|
183 |
+
level=logging.INFO,
|
184 |
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
185 |
+
)
|
186 |
+
runpod.serverless.start({"handler": handler})
|
llm_evaluation.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from sentence_transformers import SentenceTransformer, util
|
3 |
+
import nltk
|
4 |
+
from openai import OpenAI
|
5 |
+
import os
|
6 |
+
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
|
7 |
+
import time
|
8 |
+
import asyncio
|
9 |
+
import logging
|
10 |
+
import sys
|
11 |
+
# Configure logging
|
12 |
+
logging.basicConfig(level=logging.INFO)
|
13 |
+
# Download necessary NLTK resources
|
14 |
+
nltk.download('punkt')
|
15 |
+
def load_input_data():
|
16 |
+
"""Load input data from command line arguments."""
|
17 |
+
try:
|
18 |
+
input_data = json.loads(sys.argv[1])
|
19 |
+
return input_data
|
20 |
+
except json.JSONDecodeError as e:
|
21 |
+
logging.error(f"Failed to decode JSON input: {e}")
|
22 |
+
sys.exit(1)
|
23 |
+
|
24 |
+
def initialize_openai_client(api_key, base_url):
|
25 |
+
"""Initialize the OpenAI client."""
|
26 |
+
return OpenAI(api_key=api_key, base_url=base_url)
|
27 |
+
|
28 |
+
def load_model():
|
29 |
+
"""Load the pre-trained models for evaluation."""
|
30 |
+
semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
|
31 |
+
return semantic_model
|
32 |
+
|
33 |
+
def evaluate_semantic_similarity(expected_response, model_response, semantic_model):
|
34 |
+
"""Evaluate semantic similarity using Sentence-BERT."""
|
35 |
+
expected_embedding = semantic_model.encode(expected_response, convert_to_tensor=True)
|
36 |
+
model_embedding = semantic_model.encode(model_response, convert_to_tensor=True)
|
37 |
+
similarity_score = util.pytorch_cos_sim(expected_embedding, model_embedding)
|
38 |
+
return similarity_score.item()
|
39 |
+
|
40 |
+
def evaluate_bleu(expected_response, model_response):
|
41 |
+
"""Evaluate BLEU score using NLTK's sentence_bleu."""
|
42 |
+
expected_tokens = nltk.word_tokenize(expected_response.lower())
|
43 |
+
model_tokens = nltk.word_tokenize(model_response.lower())
|
44 |
+
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method1
|
45 |
+
bleu_score = nltk.translate.bleu_score.sentence_bleu([expected_tokens], model_tokens, smoothing_function=smoothing_function)
|
46 |
+
return bleu_score
|
47 |
+
|
48 |
+
async def create_with_retries(client, **kwargs):
|
49 |
+
"""Retry mechanism for handling transient server errors asynchronously."""
|
50 |
+
for attempt in range(3): # Retry up to 3 times
|
51 |
+
try:
|
52 |
+
return client.chat.completions.create(**kwargs)
|
53 |
+
except Exception as e: # Catch all exceptions since 'InternalServerError' is not defined
|
54 |
+
if attempt < 2: # Only retry for the first two attempts
|
55 |
+
print(f"Error: {e}, retrying... (Attempt {attempt + 1}/3)")
|
56 |
+
await asyncio.sleep(5) # Wait for 5 seconds before retrying
|
57 |
+
else:
|
58 |
+
raise Exception("API request failed after retries") from e
|
59 |
+
|
60 |
+
|
61 |
+
async def evaluate_model(data, model_name, client, semantic_model):
|
62 |
+
"""Evaluate the model using the provided data."""
|
63 |
+
semantic_scores = []
|
64 |
+
bleu_scores = []
|
65 |
+
|
66 |
+
for entry in data:
|
67 |
+
prompt = entry['prompt']
|
68 |
+
expected_response = entry['response']
|
69 |
+
|
70 |
+
# Create a chat completion using OpenAI API
|
71 |
+
response = await create_with_retries(
|
72 |
+
client,
|
73 |
+
model=f"PharynxAI/{model_name}",
|
74 |
+
messages=[
|
75 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
76 |
+
{"role": "user", "content": prompt}
|
77 |
+
],
|
78 |
+
temperature=0.7,
|
79 |
+
max_tokens=200,
|
80 |
+
timeout=300
|
81 |
+
)
|
82 |
+
|
83 |
+
model_response = response.choices[0].message.content # Extract model's response
|
84 |
+
|
85 |
+
# Evaluate scores
|
86 |
+
semantic_score = evaluate_semantic_similarity(expected_response, model_response, semantic_model)
|
87 |
+
semantic_scores.append(semantic_score)
|
88 |
+
|
89 |
+
bleu_score = evaluate_bleu(expected_response, model_response)
|
90 |
+
bleu_scores.append(bleu_score)
|
91 |
+
|
92 |
+
# Calculate average scores
|
93 |
+
avg_semantic_score = sum(semantic_scores) / len(semantic_scores) if semantic_scores else 0
|
94 |
+
avg_bleu_score = sum(bleu_scores) / len(bleu_scores) if bleu_scores else 0
|
95 |
+
|
96 |
+
logging.info("\nOverall Average Scores:")
|
97 |
+
logging.info(f"Average Semantic Similarity: {avg_semantic_score:.4f}")
|
98 |
+
logging.info(f"Average BLEU Score: {avg_bleu_score:.4f}")
|
99 |
+
|
100 |
+
async def main():
|
101 |
+
# Load input data
|
102 |
+
input_data = load_input_data()
|
103 |
+
model_name = input_data["model_name"]
|
104 |
+
# Initialize the OpenAI Client with your RunPod API Key and Endpoint URL
|
105 |
+
client = OpenAI(
|
106 |
+
api_key="MIGZGJKYD6PU8KTHTBQ8FMEMGP2RAW5DVXABFVFD",
|
107 |
+
base_url="https://api.runpod.ai/v2/6vg8gj8ia9vd1w/openai/v1",
|
108 |
+
)
|
109 |
+
# Load pre-trained models
|
110 |
+
semantic_model = load_model()
|
111 |
+
# Load your dataset (replace with your actual JSON file)
|
112 |
+
with open('output_json.json', 'r') as f:
|
113 |
+
data = json.load(f)
|
114 |
+
|
115 |
+
# Run the evaluation asynchronously
|
116 |
+
await evaluate_model(data, model_name, client, semantic_model)
|
117 |
+
|
118 |
+
# Start the event loop
|
119 |
+
asyncio.run(main())
|
report.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f25f7c33338f3e3cb56051e865440ca3014a9f4fb083581f279fb1d5ee5a767
|
3 |
+
size 1347277
|
requirements.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
azure-storage-blob==12.14.1
|
2 |
+
runpod
|
3 |
+
torch==2.3.1
|
4 |
+
xformers==0.0.27
|
5 |
+
transformers>=4.35.0
|
6 |
+
accelerate>=0.34.1
|
7 |
+
bitsandbytes>=0.43.3
|
8 |
+
trl>=0.7.1
|
9 |
+
peft>=0.7.0
|
10 |
+
triton
|
11 |
+
PyPDF2==2.11.2
|
12 |
+
PyMuPDF
|
13 |
+
pytesseract==0.3.10
|
14 |
+
Pillow>=9.0.0
|
15 |
+
openai>=1.0.0
|
16 |
+
datasets>=2.13.1
|
17 |
+
packaging==23.0
|
18 |
+
tqdm==4.66.1
|
19 |
+
git+https://github.com/unslothai/unsloth.git#egg=unsloth[colab-new]
|