|
import logging |
|
import runpod |
|
import os |
|
import shutil |
|
import uuid |
|
import json |
|
import time |
|
import subprocess |
|
from typing import Dict, Any |
|
from azure.storage.blob import BlobServiceClient |
|
|
|
def get_azure_connection_string(): |
|
"""Get Azure connection string from environment variable""" |
|
conn_string ="DefaultEndpointsProtocol=https;AccountName=transcribedblobstorage;AccountKey=1Z7yKPP5DLbxnoHdh7NmHgwg3dFLaDiYHUELdid7dzfzR6/DvkZnnzpJ30lrXIMhtD5GYKo+71jP+AStC1TEvA==;EndpointSuffix=core.windows.net" |
|
if not conn_string: |
|
raise ValueError("Azure Storage connection string not found in environment variables") |
|
return conn_string |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_blob(blob_name: str, download_file_path: str, container_name: str) -> None: |
|
"""Download a file from Azure Blob Storage""" |
|
connection_string = get_azure_connection_string() |
|
blob_service_client = BlobServiceClient.from_connection_string(connection_string) |
|
container_client = blob_service_client.get_container_client(container_name) |
|
blob_client = container_client.get_blob_client(blob_name) |
|
|
|
os.makedirs(os.path.dirname(download_file_path), exist_ok=True) |
|
|
|
with open(download_file_path, "wb") as download_file: |
|
download_stream = blob_client.download_blob() |
|
download_file.write(download_stream.readall()) |
|
logging.info(f"Blob '{blob_name}' downloaded to '{download_file_path}'") |
|
|
|
def clean_directory(directory: str) -> None: |
|
"""Clean up a directory by removing all files and subdirectories""" |
|
if os.path.exists(directory): |
|
for filename in os.listdir(directory): |
|
file_path = os.path.join(directory, filename) |
|
try: |
|
if os.path.isfile(file_path) or os.path.islink(file_path): |
|
os.remove(file_path) |
|
elif os.path.isdir(file_path): |
|
shutil.rmtree(file_path) |
|
except Exception as e: |
|
logging.error(f'Failed to delete {file_path}. Reason: {e}') |
|
|
|
def handler(job: Dict[str, Any]) -> Dict[str, Any]: |
|
start_time = time.time() |
|
logging.info("Handler function started") |
|
|
|
|
|
job_input = job.get('input', {}) |
|
required_fields = ['pdf_file', 'system_prompt', 'model_name', 'max_step', 'learning_rate', 'epochs', 'container'] |
|
missing_fields = [field for field in required_fields if field not in job_input] |
|
|
|
if missing_fields: |
|
return { |
|
"status": "error", |
|
"error": f"Missing required fields: {', '.join(missing_fields)}" |
|
} |
|
|
|
work_dir = os.path.abspath(f"/tmp/work_{str(uuid.uuid4())}") |
|
|
|
try: |
|
|
|
os.makedirs(work_dir, exist_ok=True) |
|
logging.info(f"Working directory created: {work_dir}") |
|
|
|
|
|
download_path = os.path.join(work_dir, "Input_PDF.pdf") |
|
download_blob(job_input['pdf_file'], download_path, job_input['container']) |
|
|
|
|
|
if not os.path.exists(download_path): |
|
raise FileNotFoundError(f"Downloaded PDF file not found at: {download_path}") |
|
|
|
|
|
pipeline_input_path = os.path.join(work_dir, "pipeline_input.json") |
|
pipeline_input = { |
|
"pdf_file": download_path, |
|
"system_prompt": job_input['system_prompt'], |
|
"model_name": job_input['model_name'], |
|
"max_step": job_input['max_step'], |
|
"learning_rate": job_input['learning_rate'], |
|
"epochs": job_input['epochs'] |
|
} |
|
|
|
with open(pipeline_input_path, 'w') as f: |
|
json.dump(pipeline_input, f) |
|
|
|
|
|
return run_pipeline_and_evaluate(pipeline_input_path, job_input['model_name'], start_time) |
|
|
|
except Exception as e: |
|
error_message = f"Job failed after {time.time() - start_time:.2f} seconds: {str(e)}" |
|
logging.error(error_message) |
|
return { |
|
"status": "error", |
|
"error": error_message |
|
} |
|
|
|
finally: |
|
|
|
try: |
|
clean_directory(work_dir) |
|
os.rmdir(work_dir) |
|
except Exception as e: |
|
logging.error(f"Failed to clean up working directory: {str(e)}") |
|
|
|
def run_pipeline_and_evaluate(pipeline_input_path: str, model_name: str, start_time: float) -> Dict[str, Any]: |
|
try: |
|
|
|
with open(pipeline_input_path, 'r') as f: |
|
pipeline_input = json.load(f) |
|
|
|
|
|
pipeline_input_str = json.dumps(pipeline_input) |
|
|
|
|
|
logging.info(f"Running pipeline with input: {pipeline_input_str[:100]}...") |
|
finetuning_result = subprocess.run( |
|
['python3', 'Finetuning_Pipeline.py', pipeline_input_str], |
|
capture_output=True, |
|
text=True, |
|
check=True |
|
) |
|
logging.info("Fine-tuning completed successfully") |
|
|
|
|
|
evaluation_input = json.dumps({"model_name": model_name}) |
|
result = subprocess.run( |
|
['python3', 'llm_evaluation.py', evaluation_input], |
|
capture_output=True, |
|
text=True, |
|
check=True |
|
) |
|
|
|
try: |
|
|
|
evaluation_results = json.loads(result.stdout) |
|
except json.JSONDecodeError: |
|
|
|
evaluation_results = {"raw_output": result.stdout} |
|
|
|
return { |
|
"status": "success", |
|
"model_name": f"PharynxAI/{model_name}", |
|
"processing_time": time.time() - start_time, |
|
"evaluation_results": evaluation_results |
|
} |
|
|
|
except subprocess.CalledProcessError as e: |
|
error_message = f"Pipeline process failed: {e.stderr}" |
|
logging.error(error_message) |
|
return { |
|
"status": "error", |
|
"error": error_message, |
|
"stdout": e.stdout, |
|
"stderr": e.stderr |
|
} |
|
except Exception as e: |
|
error_message = f"Pipeline execution failed: {str(e)}" |
|
logging.error(error_message) |
|
return { |
|
"status": "error", |
|
"error": error_message |
|
} |
|
if __name__ == "__main__": |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
) |
|
runpod.serverless.start({"handler": handler}) |