Finetuning_Dataset / general_gpt_caption_generator.py
Anustup's picture
Rename gpt_caption.py to general_gpt_caption_generator.py
792893c verified
import os
import time
import base64
import requests
import os
api_key = ""
prompt = """As an AI image tagging expert, please provide precise tags for
these images to enhance CLIP model's understanding of the content.
Employ succinct keywords or phrases, steering clear of elaborate
sentences and extraneous conjunctions. Prioritize the tags by relevance.
Your tags should capture key elements such as the main subject, setting,
artistic style, composition, image quality, color tone, filter, and camera
specifications, and any other tags crucial for the image. When tagging
photos of people, include specific details like gender, nationality,
attire, actions, pose, expressions, accessories, makeup, composition
type, age, etc. For other image categories, apply appropriate and
common descriptive tags as well. Recognize and tag any celebrities,
well-known landmark or IPs if clearly featured in the image.
Your tags should be accurate, non-duplicative, and within a
20-75 word count range. These tags will use for image re-creation,
so the closer the resemblance to the original image, the better the
tag quality. Tags should be comma-separated. Exceptional tagging will
be rewarded with $10 per image.
"""
rule_prompt = """
Follow this rules while captioning if the images have models:\n
1. For gender identification utilze Male or Female, e.g : young female \n
2. You can add the ethinicity to the gender tag, e.g : young Indian female, african male \n
3. Specify the body composition or model composition always. If the body composition have any
discripencies be more specific.\n
4. If the image have a specific activity state the particular activity e.g: yoga, swimming, gym
5. Do not add objects which are not present in the Image.\n
"""
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def create_openai_query(image_path):
base64_image = encode_image(image_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": (prompt+rule_prompt)
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
output = response.json()
print(output)
return output['choices'][0]['message']['content']
def process_images_in_folder(input_folder, output_folder, resume_from=None):
os.makedirs(output_folder, exist_ok=True)
image_files = [
f for f in os.listdir(input_folder)
if os.path.isfile(os.path.join(input_folder, f)) and not (f.endswith('.txt') or f.endswith('.npz'))]
# Track processed images
processed_log = os.path.join(output_folder, "processed_log.txt")
processed_images = set()
# Read processed log if it exists
if os.path.exists(processed_log):
with open(processed_log, 'r') as log_file:
processed_images = {line.strip() for line in log_file.readlines()}
try:
for image_file in image_files:
if resume_from and image_file <= resume_from:
continue # Skip images already processed
image_path = os.path.join(input_folder, image_file)
# Check if already processed
if image_file in processed_images:
print(f"Skipping {image_file} as it is already processed.")
continue
try:
# Simulating the processing function (replace with actual call)
processed_output = create_openai_query(image_path)
except Exception as e:
print(f"Error processing {image_file}: {str(e)}")
processed_output = "" # Stop processing further on error
# Save processed output to a text file
output_text_file_path = os.path.join(output_folder, f"{os.path.splitext(image_file)[0]}.txt")
with open(output_text_file_path, 'w') as f:
f.write(processed_output)
# Copy the image to the output folder
# output_image_path = os.path.join(output_folder, image_file)
# shutil.copy(image_path, output_image_path)
# Log processed image
with open(processed_log, 'a') as log_file:
log_file.write(f"{image_file}\n")
print(f"Processed {image_file} and saved result to {output_text_file_path}")
except Exception as e:
print(f"Error occurred: {str(e)}. Resuming might not be possible.")
return
if __name__ == "__main__":
input_folder = "/home/caimera-prod/Paid-data"
output_folder = "/home/caimera-prod/Paid-data"
# Replace with the last successfully processed image filename (without extension) to resume from that point
resume_from = None # Example: "image_003"
process_images_in_folder(input_folder, output_folder, resume_from)