Spaces:
Sleeping
Sleeping
Create main.py
Browse files
main.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, BackgroundTasks
|
2 |
+
import json
|
3 |
+
import io
|
4 |
+
from openai import OpenAI
|
5 |
+
from supabase import create_client
|
6 |
+
from typing import List, Dict, Any
|
7 |
+
import asyncio
|
8 |
+
import logging
|
9 |
+
from datetime import datetime
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
# Initialize logging
|
14 |
+
logging.basicConfig(level=logging.INFO)
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
app = FastAPI()
|
18 |
+
client = OpenAI() # Initialize your OpenAI client with proper credentials
|
19 |
+
supabase = create_client("YOUR_SUPABASE_URL", "YOUR_SUPABASE_KEY") # Initialize Supabase client
|
20 |
+
|
21 |
+
async def process_batch_job(dataset: Dict[str, Any], batch_job_id: str):
|
22 |
+
"""
|
23 |
+
Background task to process the batch job
|
24 |
+
"""
|
25 |
+
try:
|
26 |
+
logger.info(f"Starting batch processing for job {batch_job_id}")
|
27 |
+
|
28 |
+
system_prompt = '''
|
29 |
+
Your goal is to extract movie categories from movie descriptions, as well as a 1-sentence summary for these movies.
|
30 |
+
You will be provided with a movie description, and you will output a json object containing the following information:
|
31 |
+
|
32 |
+
{
|
33 |
+
categories: string[] // Array of categories based on the movie description,
|
34 |
+
summary: string // 1-sentence summary of the movie based on the movie description
|
35 |
+
}
|
36 |
+
|
37 |
+
Categories refer to the genre or type of the movie, like "action", "romance", "comedy", etc. Keep category names simple and use only lower case letters.
|
38 |
+
Movies can have several categories, but try to keep it under 3-4. Only mention the categories that are the most obvious based on the description.
|
39 |
+
'''
|
40 |
+
|
41 |
+
openai_tasks = []
|
42 |
+
for ds in dataset.get('data'):
|
43 |
+
id = ds.get('imdb_id')
|
44 |
+
description = ds.get('Description')
|
45 |
+
task = {
|
46 |
+
"custom_id": f"task-{id}",
|
47 |
+
"method": "POST",
|
48 |
+
"url": "/v1/chat/completions",
|
49 |
+
"body": {
|
50 |
+
"model": "gpt-4o-mini",
|
51 |
+
"temperature": 0.1,
|
52 |
+
"response_format": {
|
53 |
+
"type": "json_object"
|
54 |
+
},
|
55 |
+
"messages": [
|
56 |
+
{
|
57 |
+
"role": "system",
|
58 |
+
"content": system_prompt
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"role": "user",
|
62 |
+
"content": description
|
63 |
+
}
|
64 |
+
]
|
65 |
+
}
|
66 |
+
}
|
67 |
+
openai_tasks.append(task)
|
68 |
+
|
69 |
+
# Create batch file
|
70 |
+
json_obj = io.BytesIO()
|
71 |
+
for obj in openai_tasks:
|
72 |
+
json_obj.write((json.dumps(obj) + '\n').encode('utf-8'))
|
73 |
+
|
74 |
+
batch_file = client.files.create(
|
75 |
+
file=json_obj,
|
76 |
+
purpose="batch"
|
77 |
+
)
|
78 |
+
|
79 |
+
# Create batch job
|
80 |
+
batch_job = client.batches.create(
|
81 |
+
input_file_id=batch_file.id,
|
82 |
+
endpoint="/v1/chat/completions",
|
83 |
+
completion_window="24h"
|
84 |
+
)
|
85 |
+
|
86 |
+
# Update status in Supabase
|
87 |
+
supabase.table("batch_processing_details").update({
|
88 |
+
"batch_job_status": True,
|
89 |
+
"completed_at": datetime.utcnow().isoformat()
|
90 |
+
}).match({"batch_job_id": batch_job_id}).execute()
|
91 |
+
|
92 |
+
logger.info(f"Batch job {batch_job_id} processed successfully")
|
93 |
+
|
94 |
+
except Exception as e:
|
95 |
+
logger.error(f"Error processing batch job {batch_job_id}: {str(e)}")
|
96 |
+
# Update status with error
|
97 |
+
supabase.table("batch_processing_details").update({
|
98 |
+
"batch_job_status": False,
|
99 |
+
"error": str(e),
|
100 |
+
"completed_at": datetime.utcnow().isoformat()
|
101 |
+
}).eq({"batch_job_id": batch_job_id}).execute()
|
102 |
+
|
103 |
+
@app.post("/test/v1")
|
104 |
+
async def testv1(request: Request, background_tasks: BackgroundTasks):
|
105 |
+
try:
|
106 |
+
dataset = await request.json()
|
107 |
+
|
108 |
+
# Create initial batch job record
|
109 |
+
save_data = {
|
110 |
+
'batch_job_id': f"batch_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}",
|
111 |
+
"batch_job_status": False,
|
112 |
+
"created_at": datetime.utcnow().isoformat()
|
113 |
+
}
|
114 |
+
|
115 |
+
response = (
|
116 |
+
supabase.table("batch_processing_details")
|
117 |
+
.insert(save_data)
|
118 |
+
.execute()
|
119 |
+
)
|
120 |
+
|
121 |
+
# Add processing to background tasks
|
122 |
+
background_tasks.add_task(process_batch_job, dataset, save_data['batch_job_id'])
|
123 |
+
|
124 |
+
return {'data': 'Batch job is scheduled!', 'batch_job_id': save_data['batch_job_id']},
|
125 |
+
|
126 |
+
|
127 |
+
except Exception as e:
|
128 |
+
return {'error': str(e)}
|
129 |
+
|