Anustup commited on
Commit
8d76e98
·
verified ·
1 Parent(s): 0f27f8d

Upload finetune_utility_scripts.py

Browse files
Files changed (1) hide show
  1. finetune_utility_scripts.py +195 -0
finetune_utility_scripts.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """finetune-utility-scripts.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/14ZbhUPHtNt3EB0XunV_qN6OxWZHyU9wA
8
+ """
9
+
10
+ !pip install openai
11
+
12
+ import base64
13
+ import requests
14
+
15
+ api_key = "sk-proj-uCiflA45fuchFdjkbNJ7T3BlbkFJF5WiEf2zHkttr7s9kijX"
16
+ prompt = """As an AI image tagging expert, please provide precise tags for
17
+ these images to enhance CLIP model's understanding of the content.
18
+ Employ succinct keywords or phrases, steering clear of elaborate
19
+ sentences and extraneous conjunctions. Prioritize the tags by relevance.
20
+ Your tags should capture key elements such as the main subject, setting,
21
+ artistic style, composition, image quality, color tone, filter, and camera
22
+ specifications, and any other tags crucial for the image. When tagging
23
+ photos of people, include specific details like gender, nationality,
24
+ attire, actions, pose, expressions, accessories, makeup, composition
25
+ type, age, etc. For other image categories, apply appropriate and
26
+ common descriptive tags as well. Recognize and tag any celebrities,
27
+ well-known landmark or IPs if clearly featured in the image.
28
+ Your tags should be accurate, non-duplicative, and within a
29
+ 20-75 word count range. These tags will use for image re-creation,
30
+ so the closer the resemblance to the original image, the better the
31
+ tag quality. Tags should be comma-separated. Exceptional tagging will
32
+ be rewarded with $10 per image.
33
+ """
34
+
35
+ def encode_image(image_path):
36
+ with open(image_path, "rb") as image_file:
37
+ return base64.b64encode(image_file.read()).decode('utf-8')
38
+
39
+ def create_openai_query(image_path):
40
+ base64_image = encode_image(image_path)
41
+ headers = {
42
+ "Content-Type": "application/json",
43
+ "Authorization": f"Bearer {api_key}"
44
+ }
45
+ payload = {
46
+ "model": "gpt-4o",
47
+ "messages": [
48
+ {
49
+ "role": "user",
50
+ "content": [
51
+ {
52
+ "type": "text",
53
+ "text": prompt
54
+ },
55
+ {
56
+ "type": "image_url",
57
+ "image_url": {
58
+ "url": f"data:image/jpeg;base64,{base64_image}"
59
+ }
60
+ }
61
+ ]
62
+ }
63
+ ],
64
+ "max_tokens": 300
65
+ }
66
+
67
+ response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
68
+ output = response.json()
69
+ print(output)
70
+ return output['choices'][0]['message']['content']
71
+
72
+ !rm -rf "/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption"
73
+
74
+ import os
75
+ os.mkdir("/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption")
76
+
77
+ import os
78
+ import time
79
+
80
+
81
+ # Function to process images in a folder, handling API throttling
82
+ def process_images_in_folder(input_folder, output_folder, resume_from=None):
83
+ os.makedirs(output_folder, exist_ok=True)
84
+ image_files = [f for f in os.listdir(input_folder) if os.path.isfile(os.path.join(input_folder, f))]
85
+
86
+ # Track processed images
87
+ processed_log = os.path.join(output_folder, "processed_log.txt")
88
+ processed_images = set()
89
+
90
+ # Read processed log if exists
91
+ if os.path.exists(processed_log):
92
+ with open(processed_log, 'r') as log_file:
93
+ processed_images = {line.strip() for line in log_file.readlines()}
94
+
95
+ try:
96
+ for image_file in image_files:
97
+ if resume_from and image_file <= resume_from:
98
+ continue # Skip images already processed
99
+
100
+ image_path = os.path.join(input_folder, image_file)
101
+
102
+ # Check if already processed
103
+ if image_file in processed_images:
104
+ print(f"Skipping {image_file} as it is already processed.")
105
+ continue
106
+
107
+ try:
108
+ processed_output = create_openai_query(image_path)
109
+ except Exception as e:
110
+ print(f"Error processing {image_file}: {str(e)}")
111
+ processed_output = "" # Stop processing further on error
112
+
113
+ output_file_path = os.path.join(output_folder, f"{os.path.splitext(image_file)[0]}.txt")
114
+
115
+ with open(output_file_path, 'w') as f:
116
+ f.write(processed_output)
117
+
118
+ # Log processed image
119
+ with open(processed_log, 'a') as log_file:
120
+ log_file.write(f"{image_file}\n")
121
+
122
+ print(f"Processed {image_file} and saved result to {output_file_path}")
123
+
124
+ except Exception as e:
125
+ print(f"Error occurred: {str(e)}. Resuming might not be possible.")
126
+ return
127
+
128
+ if __name__ == "__main__":
129
+ input_folder = "/content/drive/MyDrive/inference-images/inference-images/caimera"
130
+ output_folder = "/content/drive/MyDrive/inference-images/caimera_captions"
131
+
132
+ # Replace with the last successfully processed image filename (without extension) to resume from that point
133
+ resume_from = None # Example: "image_003"
134
+
135
+ process_images_in_folder(input_folder, output_folder, resume_from)
136
+
137
+ import os
138
+ import shutil
139
+
140
+ def move_json_files(source_folder, destination_folder):
141
+ # Ensure destination folder exists, create if not
142
+ if not os.path.exists(destination_folder):
143
+ os.makedirs(destination_folder)
144
+
145
+ # Iterate through files in source folder
146
+ for file_name in os.listdir(source_folder):
147
+ if file_name.endswith('.png'):
148
+ source_file = os.path.join(source_folder, file_name)
149
+ destination_file = os.path.join(destination_folder, file_name)
150
+ try:
151
+ shutil.move(source_file, destination_file)
152
+ print(f"Moved {file_name} to {destination_folder}")
153
+ except Exception as e:
154
+ print(f"Failed to move {file_name}: {e}")
155
+
156
+ # Example usage:
157
+ source_folder = "/content/drive/MyDrive/inference-images/inference-images/1683/saved" # Replace with your source folder path
158
+ destination_folder = "/content/drive/MyDrive/inference-images/inference-images/caimera" # Replace with your destination folder path
159
+
160
+ move_json_files(source_folder, destination_folder)
161
+
162
+ os.mkdir('/content/drive/MyDrive/kohya_finetune_data')
163
+
164
+ import os
165
+ import shutil
166
+
167
+ def merge_folders(folder_paths, destination_folder):
168
+ if not os.path.exists(destination_folder):
169
+ os.makedirs(destination_folder)
170
+ for folder_path in folder_paths:
171
+ for filename in os.listdir(folder_path):
172
+ source_file = os.path.join(folder_path, filename)
173
+ destination_file = os.path.join(destination_folder, filename)
174
+ if os.path.exists(destination_file):
175
+ base, extension = os.path.splitext(filename)
176
+ count = 1
177
+ while os.path.exists(os.path.join(destination_folder, f"{base}_{count}{extension}")):
178
+ count += 1
179
+ destination_file = os.path.join(destination_folder, f"{base}_{count}{extension}")
180
+ shutil.copy2(source_file, destination_file)
181
+ print(f"Copied {source_file} to {destination_file}")
182
+
183
+ if __name__ == "__main__":
184
+ # Example usage
185
+ folder1 = '/content/drive/MyDrive/inference-images/caimera_captions'
186
+ folder2 = '/content/drive/MyDrive/inference-images/inference-images/caimera'
187
+ folder3 = '/content/drive/MyDrive/Finetune-Dataset/Burst'
188
+ folder4 = '/content/drive/MyDrive/Finetune-Dataset/Burst_Caption'
189
+ folder5 = '/content/drive/MyDrive/Finetune-Dataset/Pexels'
190
+ folder6 = '/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption'
191
+ destination = '/content/drive/MyDrive/kohya_finetune_data'
192
+
193
+ folders_to_merge = [folder1, folder2, folder3, folder4, folder5, folder6]
194
+ merge_folders(folders_to_merge, destination)
195
+