File size: 7,310 Bytes
a0ae865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import logging
import runpod
import os
import shutil
import uuid
import json
import time
import subprocess
from typing import Dict, Any
from azure.storage.blob import BlobServiceClient

def get_azure_connection_string():
    """Get Azure connection string from environment variable"""
    conn_string ="DefaultEndpointsProtocol=https;AccountName=transcribedblobstorage;AccountKey=1Z7yKPP5DLbxnoHdh7NmHgwg3dFLaDiYHUELdid7dzfzR6/DvkZnnzpJ30lrXIMhtD5GYKo+71jP+AStC1TEvA==;EndpointSuffix=core.windows.net"
    if not conn_string:
        raise ValueError("Azure Storage connection string not found in environment variables")
    return conn_string

# def upload_file(file_path: str, blob_name: str, container_name: str) -> str:
#     """Upload a file to Azure Blob Storage"""
#     if not os.path.isfile(file_path):
#         raise FileNotFoundError(f"The specified file does not exist: {file_path}")

#     connection_string = get_azure_connection_string()
#     blob_service_client = BlobServiceClient.from_connection_string(connection_string)
#     container_client = blob_service_client.get_container_client(container_name)
    
#     with open(file_path, 'rb') as file:
#         blob_client = container_client.get_blob_client(blob_name)
#         blob_client.upload_blob(file)
#         return blob_client.blob_name

def download_blob(blob_name: str, download_file_path: str, container_name: str) -> None:
    """Download a file from Azure Blob Storage"""
    connection_string = get_azure_connection_string()
    blob_service_client = BlobServiceClient.from_connection_string(connection_string)
    container_client = blob_service_client.get_container_client(container_name)
    blob_client = container_client.get_blob_client(blob_name)

    os.makedirs(os.path.dirname(download_file_path), exist_ok=True)
    
    with open(download_file_path, "wb") as download_file:
        download_stream = blob_client.download_blob()
        download_file.write(download_stream.readall())
    logging.info(f"Blob '{blob_name}' downloaded to '{download_file_path}'")

def clean_directory(directory: str) -> None:
    """Clean up a directory by removing all files and subdirectories"""
    if os.path.exists(directory):
        for filename in os.listdir(directory):
            file_path = os.path.join(directory, filename)
            try:
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.remove(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
            except Exception as e:
                logging.error(f'Failed to delete {file_path}. Reason: {e}')

def handler(job: Dict[str, Any]) -> Dict[str, Any]:
    start_time = time.time()
    logging.info("Handler function started")

    # Extract job input and validate
    job_input = job.get('input', {})
    required_fields = ['pdf_file', 'system_prompt', 'model_name', 'max_step', 'learning_rate', 'epochs', 'container']
    missing_fields = [field for field in required_fields if field not in job_input]
    
    if missing_fields:
        return {
            "status": "error",
            "error": f"Missing required fields: {', '.join(missing_fields)}"
        }

    work_dir = os.path.abspath(f"/tmp/work_{str(uuid.uuid4())}")
    
    try:
        # Create working directory
        os.makedirs(work_dir, exist_ok=True)
        logging.info(f"Working directory created: {work_dir}")

        # Download and process PDF
        download_path = os.path.join(work_dir, "Input_PDF.pdf")
        download_blob(job_input['pdf_file'], download_path, job_input['container'])

        # Verify downloaded file exists
        if not os.path.exists(download_path):
            raise FileNotFoundError(f"Downloaded PDF file not found at: {download_path}")

        # Save pipeline input as a JSON file
        pipeline_input_path = os.path.join(work_dir, "pipeline_input.json")
        pipeline_input = {
            "pdf_file": download_path,
            "system_prompt": job_input['system_prompt'],
            "model_name": job_input['model_name'],
            "max_step": job_input['max_step'],
            "learning_rate": job_input['learning_rate'],
            "epochs": job_input['epochs']
        }
        
        with open(pipeline_input_path, 'w') as f:
            json.dump(pipeline_input, f)

        # Run fine-tuning and evaluation
        return run_pipeline_and_evaluate(pipeline_input_path, job_input['model_name'], start_time)

    except Exception as e:
        error_message = f"Job failed after {time.time() - start_time:.2f} seconds: {str(e)}"
        logging.error(error_message)
        return {
            "status": "error",
            "error": error_message
        }

    finally:
        # Clean up working directory
        try:
            clean_directory(work_dir)
            os.rmdir(work_dir)
        except Exception as e:
            logging.error(f"Failed to clean up working directory: {str(e)}")

def run_pipeline_and_evaluate(pipeline_input_path: str, model_name: str, start_time: float) -> Dict[str, Any]:
    try:
        # Read the pipeline input file
        with open(pipeline_input_path, 'r') as f:
            pipeline_input = json.load(f)
            
        # Convert the input to a JSON string for passing as an argument
        pipeline_input_str = json.dumps(pipeline_input)
        
        # Run fine-tuning pipeline with JSON string as argument
        logging.info(f"Running pipeline with input: {pipeline_input_str[:100]}...")
        finetuning_result = subprocess.run(
            ['python3', 'Finetuning_Pipeline.py', pipeline_input_str],
            capture_output=True,
            text=True,
            check=True
        )
        logging.info("Fine-tuning completed successfully")

        # Run evaluation
        evaluation_input = json.dumps({"model_name": model_name})
        result = subprocess.run(
            ['python3', 'llm_evaluation.py', evaluation_input],
            capture_output=True,
            text=True,
            check=True
        )
        
        try:
            # Try to parse the evaluation output as JSON
            evaluation_results = json.loads(result.stdout)
        except json.JSONDecodeError:
            # If parsing fails, use the raw output
            evaluation_results = {"raw_output": result.stdout}

        return {
            "status": "success",
            "model_name": f"PharynxAI/{model_name}",
            "processing_time": time.time() - start_time,
            "evaluation_results": evaluation_results
        }

    except subprocess.CalledProcessError as e:
        error_message = f"Pipeline process failed: {e.stderr}"
        logging.error(error_message)
        return {
            "status": "error",
            "error": error_message,
            "stdout": e.stdout,
            "stderr": e.stderr
        }
    except Exception as e:
        error_message = f"Pipeline execution failed: {str(e)}"
        logging.error(error_message)
        return {
            "status": "error",
            "error": error_message
        }
if __name__ == "__main__":
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    runpod.serverless.start({"handler": handler})