Upload 13 files
Browse files- a.py +63 -0
- b.py +66 -0
- clean_tag.py +37 -0
- gpt_caption.py +134 -0
- gpt_eye_tagger.py +117 -0
- python_script.py +17 -0
- st.py +45 -0
- st_eye_plot.py +61 -0
- st_tag_clean.py +44 -0
- stream_lit_plotly.py +39 -0
- stream_new.py +65 -0
- streamlit.py +33 -0
- transfer.py +28 -0
a.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from PIL import Image
|
4 |
+
from collections import Counter
|
5 |
+
|
6 |
+
# Function to list files with given extensions
|
7 |
+
def list_files(folder_path, extensions):
|
8 |
+
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
9 |
+
return [f for f in files if f.split('.')[-1] in extensions]
|
10 |
+
|
11 |
+
# Function to get tag frequencies from text files
|
12 |
+
def get_tag_frequencies(text_files):
|
13 |
+
tag_counter = Counter()
|
14 |
+
for text_file in text_files:
|
15 |
+
with open(text_file, 'r') as file:
|
16 |
+
tags = file.read().split()
|
17 |
+
tag_counter.update(tags)
|
18 |
+
return tag_counter
|
19 |
+
|
20 |
+
# Set up Streamlit app
|
21 |
+
st.title("Display Images and Corresponding Text Files")
|
22 |
+
|
23 |
+
# Define the folder path
|
24 |
+
folder_path = "/home/caimera-prod/kohya_new_dataset"
|
25 |
+
|
26 |
+
# List of allowed image and text extensions
|
27 |
+
image_extensions = ['jpg', 'jpeg', 'png']
|
28 |
+
text_extensions = ['txt']
|
29 |
+
|
30 |
+
# Get the list of image and text files
|
31 |
+
files = list_files(folder_path, image_extensions + text_extensions)
|
32 |
+
|
33 |
+
# Filter files into images and texts
|
34 |
+
images = [f for f in files if f.split('.')[-1] in image_extensions]
|
35 |
+
texts = [f for f in files if f.split('.')[-1] in text_extensions]
|
36 |
+
|
37 |
+
# Create a dictionary to map image files to their corresponding text files
|
38 |
+
file_map = {}
|
39 |
+
for image in images:
|
40 |
+
base_name = os.path.splitext(image)[0]
|
41 |
+
corresponding_text = base_name + '.txt'
|
42 |
+
if corresponding_text in texts:
|
43 |
+
file_map[image] = corresponding_text
|
44 |
+
|
45 |
+
# Get tag frequencies
|
46 |
+
text_files_paths = [os.path.join(folder_path, text) for text in texts]
|
47 |
+
tag_frequencies = get_tag_frequencies(text_files_paths)
|
48 |
+
|
49 |
+
# Display tag frequencies
|
50 |
+
st.subheader("Tag Frequencies")
|
51 |
+
for tag, freq in tag_frequencies.most_common():
|
52 |
+
st.write(f"{tag}: {freq}")
|
53 |
+
|
54 |
+
# Display images and text files side by side
|
55 |
+
for image_file, text_file in file_map.items():
|
56 |
+
col1, col2 = st.columns(2)
|
57 |
+
|
58 |
+
with col1:
|
59 |
+
st.image(os.path.join(folder_path, image_file), caption=image_file, use_column_width=True)
|
60 |
+
|
61 |
+
with col2:
|
62 |
+
with open(os.path.join(folder_path, text_file), 'r') as file:
|
63 |
+
st.text_area(text_file, file.read(), height=300)
|
b.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from PIL import Image
|
4 |
+
from collections import Counter
|
5 |
+
import pandas as pd
|
6 |
+
# Function to list files with given extensions
|
7 |
+
def list_files(folder_path, extensions):
|
8 |
+
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
9 |
+
return [f for f in files if f.split('.')[-1] in extensions]
|
10 |
+
|
11 |
+
# Function to get tag frequencies from text files
|
12 |
+
def get_tag_frequencies(text_files):
|
13 |
+
tag_counter = Counter()
|
14 |
+
for text_file in text_files:
|
15 |
+
with open(text_file, 'r') as file:
|
16 |
+
tags = file.read().split()
|
17 |
+
tag_counter.update(tags)
|
18 |
+
return tag_counter
|
19 |
+
|
20 |
+
# Set up Streamlit app
|
21 |
+
st.title("Display Images and Corresponding Text Files")
|
22 |
+
|
23 |
+
# Define the folder path
|
24 |
+
folder_path = "/home/caimera-prod/kohya_new_dataset"
|
25 |
+
|
26 |
+
# List of allowed image and text extensions
|
27 |
+
image_extensions = ['jpg', 'jpeg', 'png']
|
28 |
+
text_extensions = ['txt']
|
29 |
+
|
30 |
+
# Get the list of image and text files
|
31 |
+
files = list_files(folder_path, image_extensions + text_extensions)
|
32 |
+
|
33 |
+
# Filter files into images and texts
|
34 |
+
images = [f for f in files if f.split('.')[-1] in image_extensions]
|
35 |
+
texts = [f for f in files if f.split('.')[-1] in text_extensions]
|
36 |
+
|
37 |
+
# Create a dictionary to map image files to their corresponding text files
|
38 |
+
file_map = {}
|
39 |
+
for image in images:
|
40 |
+
base_name = os.path.splitext(image)[0]
|
41 |
+
corresponding_text = base_name + '.txt'
|
42 |
+
if corresponding_text in texts:
|
43 |
+
file_map[image] = corresponding_text
|
44 |
+
|
45 |
+
# Get tag frequencies
|
46 |
+
text_files_paths = [os.path.join(folder_path, text) for text in texts]
|
47 |
+
tag_frequencies = get_tag_frequencies(text_files_paths)
|
48 |
+
|
49 |
+
# Prepare tag frequencies for display
|
50 |
+
tag_frequencies_data = [{'Tag': tag, 'Frequency': freq} for tag, freq in tag_frequencies.items()]
|
51 |
+
tag_frequencies_df = pd.DataFrame(tag_frequencies_data)
|
52 |
+
|
53 |
+
# Display tag frequencies in a table
|
54 |
+
st.subheader("Tag Frequencies")
|
55 |
+
st.table(tag_frequencies_df)
|
56 |
+
|
57 |
+
# Display images and text files side by side
|
58 |
+
for image_file, text_file in file_map.items():
|
59 |
+
col1, col2 = st.columns(2)
|
60 |
+
|
61 |
+
with col1:
|
62 |
+
st.image(os.path.join(folder_path, image_file), caption=image_file, use_column_width=True)
|
63 |
+
|
64 |
+
with col2:
|
65 |
+
with open(os.path.join(folder_path, text_file), 'r') as file:
|
66 |
+
st.text_area(text_file, file.read(), height=300)
|
clean_tag.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def clean_gym_keywords(text):
|
4 |
+
# Split the text by commas
|
5 |
+
parts = [part.strip() for part in text.split(',')]
|
6 |
+
|
7 |
+
# Remove duplicate "gym" keywords
|
8 |
+
unique_parts = []
|
9 |
+
seen = False
|
10 |
+
for part in parts:
|
11 |
+
if part.lower() == 'gym':
|
12 |
+
if not seen:
|
13 |
+
unique_parts.append(part)
|
14 |
+
seen = True
|
15 |
+
else:
|
16 |
+
unique_parts.append(part)
|
17 |
+
seen = False
|
18 |
+
|
19 |
+
# Join the parts back into a single string
|
20 |
+
cleaned_text = ', '.join(unique_parts)
|
21 |
+
return cleaned_text
|
22 |
+
|
23 |
+
def process_files(folder_path):
|
24 |
+
for filename in os.listdir(folder_path):
|
25 |
+
if filename.endswith('.txt'):
|
26 |
+
file_path = os.path.join(folder_path, filename)
|
27 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
28 |
+
content = file.read()
|
29 |
+
|
30 |
+
cleaned_content = clean_gym_keywords(content)
|
31 |
+
|
32 |
+
with open(file_path, 'w', encoding='utf-8') as file:
|
33 |
+
file.write(cleaned_content)
|
34 |
+
|
35 |
+
# Specify the path to the folder containing your text files
|
36 |
+
folder_path = '/home/caimera-prod/kohya_new_dataset'
|
37 |
+
process_files(folder_path)
|
gpt_caption.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import base64
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
|
7 |
+
api_key = "sk-proj-uCiflA45fuchFdjkbNJ7T3BlbkFJF5WiEf2zHkttr7s9kijX"
|
8 |
+
prompt = """As an AI image tagging expert, please provide precise tags for
|
9 |
+
these images to enhance CLIP model's understanding of the content.
|
10 |
+
Employ succinct keywords or phrases, steering clear of elaborate
|
11 |
+
sentences and extraneous conjunctions. Prioritize the tags by relevance.
|
12 |
+
Your tags should capture key elements such as the main subject, setting,
|
13 |
+
artistic style, composition, image quality, color tone, filter, and camera
|
14 |
+
specifications, and any other tags crucial for the image. When tagging
|
15 |
+
photos of people, include specific details like gender, nationality,
|
16 |
+
attire, actions, pose, expressions, accessories, makeup, composition
|
17 |
+
type, age, etc. For other image categories, apply appropriate and
|
18 |
+
common descriptive tags as well. Recognize and tag any celebrities,
|
19 |
+
well-known landmark or IPs if clearly featured in the image.
|
20 |
+
Your tags should be accurate, non-duplicative, and within a
|
21 |
+
20-75 word count range. These tags will use for image re-creation,
|
22 |
+
so the closer the resemblance to the original image, the better the
|
23 |
+
tag quality. Tags should be comma-separated. Exceptional tagging will
|
24 |
+
be rewarded with $10 per image.
|
25 |
+
"""
|
26 |
+
rule_prompt = """
|
27 |
+
Follow this rules while captioning if the images have models:\n
|
28 |
+
1. For gender identification utilze Male or Female, e.g : young female \n
|
29 |
+
2. You can add the ethinicity to the gender tag, e.g : young Indian female, african male \n
|
30 |
+
3. Specify the body composition or model composition always. If the body composition have any
|
31 |
+
discripencies be more specific.\n
|
32 |
+
4. If the image have a specific activity state the particular activity e.g: yoga, swimming, gym
|
33 |
+
5. Do not add objects which are not present in the Image.\n
|
34 |
+
"""
|
35 |
+
def encode_image(image_path):
|
36 |
+
with open(image_path, "rb") as image_file:
|
37 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
38 |
+
|
39 |
+
def create_openai_query(image_path):
|
40 |
+
base64_image = encode_image(image_path)
|
41 |
+
headers = {
|
42 |
+
"Content-Type": "application/json",
|
43 |
+
"Authorization": f"Bearer {api_key}"
|
44 |
+
}
|
45 |
+
payload = {
|
46 |
+
"model": "gpt-4o",
|
47 |
+
"messages": [
|
48 |
+
{
|
49 |
+
"role": "user",
|
50 |
+
"content": [
|
51 |
+
{
|
52 |
+
"type": "text",
|
53 |
+
"text": (prompt+rule_prompt)
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"type": "image_url",
|
57 |
+
"image_url": {
|
58 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
59 |
+
}
|
60 |
+
}
|
61 |
+
]
|
62 |
+
}
|
63 |
+
],
|
64 |
+
"max_tokens": 300
|
65 |
+
}
|
66 |
+
|
67 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
68 |
+
output = response.json()
|
69 |
+
print(output)
|
70 |
+
return output['choices'][0]['message']['content']
|
71 |
+
|
72 |
+
|
73 |
+
def process_images_in_folder(input_folder, output_folder, resume_from=None):
|
74 |
+
os.makedirs(output_folder, exist_ok=True)
|
75 |
+
|
76 |
+
image_files = [
|
77 |
+
f for f in os.listdir(input_folder)
|
78 |
+
if os.path.isfile(os.path.join(input_folder, f)) and not (f.endswith('.txt') or f.endswith('.npz'))]
|
79 |
+
|
80 |
+
# Track processed images
|
81 |
+
processed_log = os.path.join(output_folder, "processed_log.txt")
|
82 |
+
processed_images = set()
|
83 |
+
|
84 |
+
# Read processed log if it exists
|
85 |
+
if os.path.exists(processed_log):
|
86 |
+
with open(processed_log, 'r') as log_file:
|
87 |
+
processed_images = {line.strip() for line in log_file.readlines()}
|
88 |
+
|
89 |
+
try:
|
90 |
+
for image_file in image_files:
|
91 |
+
if resume_from and image_file <= resume_from:
|
92 |
+
continue # Skip images already processed
|
93 |
+
|
94 |
+
image_path = os.path.join(input_folder, image_file)
|
95 |
+
|
96 |
+
# Check if already processed
|
97 |
+
if image_file in processed_images:
|
98 |
+
print(f"Skipping {image_file} as it is already processed.")
|
99 |
+
continue
|
100 |
+
|
101 |
+
try:
|
102 |
+
# Simulating the processing function (replace with actual call)
|
103 |
+
processed_output = create_openai_query(image_path)
|
104 |
+
except Exception as e:
|
105 |
+
print(f"Error processing {image_file}: {str(e)}")
|
106 |
+
processed_output = "" # Stop processing further on error
|
107 |
+
|
108 |
+
# Save processed output to a text file
|
109 |
+
output_text_file_path = os.path.join(output_folder, f"{os.path.splitext(image_file)[0]}.txt")
|
110 |
+
with open(output_text_file_path, 'w') as f:
|
111 |
+
f.write(processed_output)
|
112 |
+
|
113 |
+
# Copy the image to the output folder
|
114 |
+
# output_image_path = os.path.join(output_folder, image_file)
|
115 |
+
# shutil.copy(image_path, output_image_path)
|
116 |
+
|
117 |
+
# Log processed image
|
118 |
+
with open(processed_log, 'a') as log_file:
|
119 |
+
log_file.write(f"{image_file}\n")
|
120 |
+
|
121 |
+
print(f"Processed {image_file} and saved result to {output_text_file_path}")
|
122 |
+
|
123 |
+
except Exception as e:
|
124 |
+
print(f"Error occurred: {str(e)}. Resuming might not be possible.")
|
125 |
+
return
|
126 |
+
|
127 |
+
if __name__ == "__main__":
|
128 |
+
input_folder = "/home/caimera-prod/Paid-data"
|
129 |
+
output_folder = "/home/caimera-prod/Paid-data"
|
130 |
+
|
131 |
+
# Replace with the last successfully processed image filename (without extension) to resume from that point
|
132 |
+
resume_from = None # Example: "image_003"
|
133 |
+
|
134 |
+
process_images_in_folder(input_folder, output_folder, resume_from)
|
gpt_eye_tagger.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import base64
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
|
7 |
+
api_key = "sk-proj-uCiflA45fuchFdjkbNJ7T3BlbkFJF5WiEf2zHkttr7s9kijX"
|
8 |
+
prompt = """As an AI image tagging expert, please provide one precise tag of the eye for
|
9 |
+
these images to enhance CLIP model's understanding of the content.
|
10 |
+
Employ succinct keywords or phrases, steering clear of elaborate
|
11 |
+
sentences and extraneous conjunctions.
|
12 |
+
"""
|
13 |
+
rule_prompt = """
|
14 |
+
Follow this rules while captioning the eye:
|
15 |
+
If eye is not visible then tag the pose
|
16 |
+
If eye is visible tag it in details along with ethnicity for example round nepali woman eye
|
17 |
+
"""
|
18 |
+
def encode_image(image_path):
|
19 |
+
with open(image_path, "rb") as image_file:
|
20 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
21 |
+
|
22 |
+
def create_openai_query(image_path):
|
23 |
+
base64_image = encode_image(image_path)
|
24 |
+
headers = {
|
25 |
+
"Content-Type": "application/json",
|
26 |
+
"Authorization": f"Bearer {api_key}"
|
27 |
+
}
|
28 |
+
payload = {
|
29 |
+
"model": "gpt-4o",
|
30 |
+
"messages": [
|
31 |
+
{
|
32 |
+
"role": "user",
|
33 |
+
"content": [
|
34 |
+
{
|
35 |
+
"type": "text",
|
36 |
+
"text": (prompt+rule_prompt)
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"type": "image_url",
|
40 |
+
"image_url": {
|
41 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
42 |
+
}
|
43 |
+
}
|
44 |
+
]
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"max_tokens": 300
|
48 |
+
}
|
49 |
+
|
50 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
51 |
+
output = response.json()
|
52 |
+
print(output)
|
53 |
+
return output['choices'][0]['message']['content']
|
54 |
+
|
55 |
+
|
56 |
+
def process_images_in_folder(input_folder, output_folder, resume_from=None):
|
57 |
+
os.makedirs(output_folder, exist_ok=True)
|
58 |
+
|
59 |
+
image_files = [
|
60 |
+
f for f in os.listdir(input_folder)
|
61 |
+
if os.path.isfile(os.path.join(input_folder, f)) and not (f.endswith('.txt') or f.endswith('.npz'))]
|
62 |
+
|
63 |
+
# Track processed images
|
64 |
+
processed_log = os.path.join(output_folder, "processed_log.txt")
|
65 |
+
processed_images = set()
|
66 |
+
|
67 |
+
# Read processed log if it exists
|
68 |
+
if os.path.exists(processed_log):
|
69 |
+
with open(processed_log, 'r') as log_file:
|
70 |
+
processed_images = {line.strip() for line in log_file.readlines()}
|
71 |
+
|
72 |
+
try:
|
73 |
+
for image_file in image_files:
|
74 |
+
if resume_from and image_file <= resume_from:
|
75 |
+
continue # Skip images already processed
|
76 |
+
|
77 |
+
image_path = os.path.join(input_folder, image_file)
|
78 |
+
|
79 |
+
# Check if already processed
|
80 |
+
if image_file in processed_images:
|
81 |
+
print(f"Skipping {image_file} as it is already processed.")
|
82 |
+
continue
|
83 |
+
|
84 |
+
try:
|
85 |
+
# Simulating the processing function (replace with actual call)
|
86 |
+
processed_output = create_openai_query(image_path)
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error processing {image_file}: {str(e)}")
|
89 |
+
processed_output = "" # Stop processing further on error
|
90 |
+
|
91 |
+
# Save processed output to a text file
|
92 |
+
output_text_file_path = os.path.join(output_folder, f"{os.path.splitext(image_file)[0]}.txt")
|
93 |
+
with open(output_text_file_path, 'w') as f:
|
94 |
+
f.write(processed_output)
|
95 |
+
|
96 |
+
# Copy the image to the output folder
|
97 |
+
# output_image_path = os.path.join(output_folder, image_file)
|
98 |
+
# shutil.copy(image_path, output_image_path)
|
99 |
+
|
100 |
+
# Log processed image
|
101 |
+
with open(processed_log, 'a') as log_file:
|
102 |
+
log_file.write(f"{image_file}\n")
|
103 |
+
|
104 |
+
print(f"Processed {image_file} and saved result to {output_text_file_path}")
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
print(f"Error occurred: {str(e)}. Resuming might not be possible.")
|
108 |
+
return
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
input_folder = "/home/caimera-prod/eye_tagged_data"
|
112 |
+
output_folder = "/home/caimera-prod/eye_tagged_data"
|
113 |
+
|
114 |
+
# Replace with the last successfully processed image filename (without extension) to resume from that point
|
115 |
+
resume_from = None # Example: "image_003"
|
116 |
+
|
117 |
+
process_images_in_folder(input_folder, output_folder, resume_from)
|
python_script.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
|
5 |
+
image_folder = "masked_loss_training_data/images"
|
6 |
+
mask_folder = "masked_loss_training_data/conditioning"
|
7 |
+
|
8 |
+
image_files = os.listdir(image_folder)
|
9 |
+
jpg_files = [f for f in image_files if f.endswith('.jpg')]
|
10 |
+
|
11 |
+
for file_name in jpg_files:
|
12 |
+
img = cv2.imread(os.path.join(image_folder, file_name), cv2.IMREAD_GRAYSCALE)
|
13 |
+
_, bw_mask = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
|
14 |
+
output_file_path = os.path.join(mask_folder, os.path.splitext(file_name)[0] + '.png')
|
15 |
+
cv2.imwrite(output_file_path, bw_mask)
|
16 |
+
|
17 |
+
|
st.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
# Function to list files with given extensions
|
6 |
+
def list_files(folder_path, extensions):
|
7 |
+
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
8 |
+
return [f for f in files if f.split('.')[-1] in extensions]
|
9 |
+
|
10 |
+
# Set up Streamlit app
|
11 |
+
st.title("Display Images and Corresponding Text Files")
|
12 |
+
|
13 |
+
# Define the folder path
|
14 |
+
folder_path = "/home/caimera-prod/kohya_new_dataset"
|
15 |
+
|
16 |
+
# List of allowed image and text extensions
|
17 |
+
image_extensions = ['jpg', 'jpeg', 'png']
|
18 |
+
text_extensions = ['txt']
|
19 |
+
|
20 |
+
# Get the list of image and text files
|
21 |
+
files = list_files(folder_path, image_extensions + text_extensions)
|
22 |
+
|
23 |
+
# Filter files into images and texts
|
24 |
+
images = [f for f in files if f.split('.')[-1] in image_extensions]
|
25 |
+
texts = [f for f in files if f.split('.')[-1] in text_extensions]
|
26 |
+
|
27 |
+
# Create a dictionary to map image files to their corresponding text files
|
28 |
+
file_map = {}
|
29 |
+
for image in images:
|
30 |
+
base_name = os.path.splitext(image)[0]
|
31 |
+
corresponding_text = base_name + '.txt'
|
32 |
+
if corresponding_text in texts:
|
33 |
+
file_map[image] = corresponding_text
|
34 |
+
|
35 |
+
# Display images and text files side by side
|
36 |
+
for image_file, text_file in file_map.items():
|
37 |
+
col1, col2 = st.columns(2)
|
38 |
+
|
39 |
+
with col1:
|
40 |
+
st.image(os.path.join(folder_path, image_file), caption=image_file, use_column_width=True)
|
41 |
+
|
42 |
+
with col2:
|
43 |
+
with open(os.path.join(folder_path, text_file), 'r') as file:
|
44 |
+
st.text_area(text_file, file.read(), height=300)
|
45 |
+
|
st_eye_plot.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import Counter
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.express as px
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
# Streamlit app title
|
9 |
+
st.title('Interactive Tag Frequency Visualization')
|
10 |
+
|
11 |
+
# Folder path
|
12 |
+
folder_path = "/home/caimera-prod/eye_tagged_data"
|
13 |
+
|
14 |
+
if folder_path:
|
15 |
+
# Initialize a Counter to count tag frequency
|
16 |
+
tag_counter = Counter()
|
17 |
+
file_resolutions = []
|
18 |
+
|
19 |
+
# Iterate through each .txt file in the folder
|
20 |
+
for file_name in os.listdir(folder_path):
|
21 |
+
if file_name.endswith('.txt'):
|
22 |
+
file_path = os.path.join(folder_path, file_name)
|
23 |
+
with open(file_path, 'r') as file:
|
24 |
+
content = file.read().strip()
|
25 |
+
if 'eye' in content.lower(): # Check if 'eye' is in the content
|
26 |
+
tags = content.split(',')
|
27 |
+
# Clean and count each tag
|
28 |
+
tags = [tag.strip().lower() for tag in tags]
|
29 |
+
tag_counter.update(tags)
|
30 |
+
|
31 |
+
# Find the corresponding image and get its resolution
|
32 |
+
image_name = file_name.replace('.txt', '.jpg') # Assuming the images are in .jpg format
|
33 |
+
image_path = os.path.join(folder_path, image_name)
|
34 |
+
if os.path.exists(image_path):
|
35 |
+
image = Image.open(image_path)
|
36 |
+
resolution = image.size # (width, height)
|
37 |
+
file_resolutions.append((file_name, resolution[0], resolution[1]))
|
38 |
+
else:
|
39 |
+
file_resolutions.append((file_name, 'N/A', 'N/A')) # In case the image is not found
|
40 |
+
|
41 |
+
# Convert the Counter and resolution data to a DataFrame for better display
|
42 |
+
tag_data = pd.DataFrame(tag_counter.items(), columns=['Tag', 'Count'])
|
43 |
+
tag_data = tag_data.sort_values(by='Count', ascending=False).reset_index(drop=True)
|
44 |
+
|
45 |
+
resolution_data = pd.DataFrame(file_resolutions, columns=['File Name', 'Width', 'Height'])
|
46 |
+
|
47 |
+
# Display the DataFrame as a table in Streamlit
|
48 |
+
if not tag_data.empty:
|
49 |
+
st.subheader('Tag Frequency Table')
|
50 |
+
st.dataframe(tag_data)
|
51 |
+
|
52 |
+
st.subheader('Image Resolutions for Files Containing "eye"')
|
53 |
+
st.dataframe(resolution_data)
|
54 |
+
|
55 |
+
# Create an interactive bar chart using Plotly
|
56 |
+
st.subheader('Interactive Tag Frequency Bar Chart')
|
57 |
+
fig = px.bar(tag_data, x='Tag', y='Count', title='Tag Frequency', labels={'Count': 'Frequency'}, height=600)
|
58 |
+
fig.update_layout(xaxis_title='Tags', yaxis_title='Frequency')
|
59 |
+
st.plotly_chart(fig)
|
60 |
+
else:
|
61 |
+
st.write("No tags found in files containing the word 'eye'.")
|
st_tag_clean.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import Counter
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.express as px
|
6 |
+
|
7 |
+
# Streamlit app title
|
8 |
+
# st.title('Interactive Tag Frequency Visualization')
|
9 |
+
|
10 |
+
# File uploader to select folder
|
11 |
+
folder_path = "/home/caimera-prod/kohya_new_dataset"
|
12 |
+
|
13 |
+
if folder_path:
|
14 |
+
# Initialize a Counter to count tag frequency
|
15 |
+
tag_counter = Counter()
|
16 |
+
|
17 |
+
# Iterate through each .txt file in the folder
|
18 |
+
for file_name in os.listdir(folder_path):
|
19 |
+
if file_name.endswith('.txt'):
|
20 |
+
file_path = os.path.join(folder_path, file_name)
|
21 |
+
with open(file_path, 'r') as file:
|
22 |
+
tags = file.read().strip().split(',')
|
23 |
+
# Clean, filter out empty tags, and count each tag
|
24 |
+
tags = [tag.strip().lower() for tag in tags if tag.strip()]
|
25 |
+
tag_counter.update(tags)
|
26 |
+
|
27 |
+
# Convert the Counter to a DataFrame for better display
|
28 |
+
tag_data = pd.DataFrame(tag_counter.items(), columns=['Tag', 'Count'])
|
29 |
+
|
30 |
+
# Filter out any rows with empty tags in the DataFrame (shouldn't be necessary after above filtering)
|
31 |
+
tag_data = tag_data[tag_data['Tag'] != '']
|
32 |
+
|
33 |
+
# Sort the DataFrame by count
|
34 |
+
tag_data = tag_data.sort_values(by='Count', ascending=False).reset_index(drop=True)
|
35 |
+
|
36 |
+
# Display the DataFrame as a table in Streamlit
|
37 |
+
st.subheader('Tag Frequency Table')
|
38 |
+
st.dataframe(tag_data)
|
39 |
+
|
40 |
+
# Create an interactive bar chart using Plotly
|
41 |
+
st.subheader('Interactive Tag Frequency Bar Chart')
|
42 |
+
fig = px.bar(tag_data, x='Tag', y='Count', title='Tag Frequency', labels={'Count': 'Frequency'}, height=600)
|
43 |
+
fig.update_layout(xaxis_title='Tags', yaxis_title='Frequency')
|
44 |
+
st.plotly_chart(fig)
|
stream_lit_plotly.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import Counter
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.express as px
|
6 |
+
|
7 |
+
# Streamlit app title
|
8 |
+
# st.title('Interactive Tag Frequency Visualization')
|
9 |
+
|
10 |
+
# File uploader to select folder
|
11 |
+
folder_path = "/home/caimera-prod/eye_tagged_data"
|
12 |
+
|
13 |
+
if folder_path:
|
14 |
+
# Initialize a Counter to count tag frequency
|
15 |
+
tag_counter = Counter()
|
16 |
+
|
17 |
+
# Iterate through each .txt file in the folder
|
18 |
+
for file_name in os.listdir(folder_path):
|
19 |
+
if file_name.endswith('.txt'):
|
20 |
+
file_path = os.path.join(folder_path, file_name)
|
21 |
+
with open(file_path, 'r') as file:
|
22 |
+
tags = file.read().strip().split(',')
|
23 |
+
# Clean and count each tag
|
24 |
+
tags = [tag.strip().lower() for tag in tags]
|
25 |
+
tag_counter.update(tags)
|
26 |
+
|
27 |
+
# Convert the Counter to a DataFrame for better display
|
28 |
+
tag_data = pd.DataFrame(tag_counter.items(), columns=['Tag', 'Count'])
|
29 |
+
tag_data = tag_data.sort_values(by='Count', ascending=False).reset_index(drop=True)
|
30 |
+
|
31 |
+
# Display the DataFrame as a table in Streamlit
|
32 |
+
st.subheader('Tag Frequency Table')
|
33 |
+
st.dataframe(tag_data)
|
34 |
+
|
35 |
+
# Create an interactive bar chart using Plotly
|
36 |
+
st.subheader('Interactive Tag Frequency Bar Chart')
|
37 |
+
fig = px.bar(tag_data, x='Tag', y='Count', title='Tag Frequency', labels={'Count': 'Frequency'}, height=600)
|
38 |
+
fig.update_layout(xaxis_title='Tags', yaxis_title='Frequency')
|
39 |
+
st.plotly_chart(fig)
|
stream_new.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from PIL import Image
|
4 |
+
import math
|
5 |
+
|
6 |
+
# Function to list files with given extensions
|
7 |
+
def list_files(folder_path, extensions):
|
8 |
+
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
9 |
+
return [f for f in files if f.split('.')[-1] in extensions]
|
10 |
+
|
11 |
+
# Set up Streamlit app
|
12 |
+
st.title("Display Images and Corresponding Text Files")
|
13 |
+
|
14 |
+
# Define the folder path
|
15 |
+
folder_path = "/home/caimera-prod/kohya_new_dataset"
|
16 |
+
|
17 |
+
# List of allowed image and text extensions
|
18 |
+
image_extensions = ['jpg', 'jpeg', 'png']
|
19 |
+
text_extensions = ['txt']
|
20 |
+
|
21 |
+
# Get the list of image and text files
|
22 |
+
files = list_files(folder_path, image_extensions + text_extensions)
|
23 |
+
|
24 |
+
# Filter files into images and texts
|
25 |
+
images = [f for f in files if f.split('.')[-1] in image_extensions]
|
26 |
+
texts = [f for f in files if f.split('.')[-1] in text_extensions]
|
27 |
+
|
28 |
+
# Create a dictionary to map image files to their corresponding text files
|
29 |
+
file_map = {}
|
30 |
+
for image in images:
|
31 |
+
base_name = os.path.splitext(image)[0]
|
32 |
+
corresponding_text = base_name + '.txt'
|
33 |
+
if corresponding_text in texts:
|
34 |
+
file_map[image] = corresponding_text
|
35 |
+
|
36 |
+
# Pagination settings
|
37 |
+
items_per_page = 5
|
38 |
+
total_items = len(file_map)
|
39 |
+
total_pages = math.ceil(total_items / items_per_page)
|
40 |
+
page = st.sidebar.slider('Page', 1, total_pages, 1)
|
41 |
+
|
42 |
+
# Calculate the start and end indices for the current page
|
43 |
+
start_idx = (page - 1) * items_per_page
|
44 |
+
end_idx = start_idx + items_per_page
|
45 |
+
|
46 |
+
# Display images and text files side by side with editing capability
|
47 |
+
for image_file, text_file in list(file_map.items())[start_idx:end_idx]:
|
48 |
+
col1, col2 = st.columns(2)
|
49 |
+
|
50 |
+
with col1:
|
51 |
+
st.image(os.path.join(folder_path, image_file), caption=image_file, use_column_width=True)
|
52 |
+
|
53 |
+
with col2:
|
54 |
+
text_path = os.path.join(folder_path, text_file)
|
55 |
+
with open(text_path, 'r') as file:
|
56 |
+
text_content = file.read()
|
57 |
+
|
58 |
+
# Text area for editing text content
|
59 |
+
updated_text = st.text_area(text_file, text_content, height=300)
|
60 |
+
|
61 |
+
# Save the edited content back to the file if changes were made
|
62 |
+
if st.button(f'Save {text_file}'):
|
63 |
+
with open(text_path, 'w') as file:
|
64 |
+
file.write(updated_text)
|
65 |
+
st.success(f"Changes to {text_file} saved successfully.")
|
streamlit.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import Counter
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
# Streamlit app title
|
7 |
+
# st.title('Tag Frequency Table')
|
8 |
+
|
9 |
+
# File uploader to select folder
|
10 |
+
folder_path = "/home/caimera-prod/Paid-data"
|
11 |
+
|
12 |
+
if folder_path:
|
13 |
+
# Initialize a Counter to count tag frequency
|
14 |
+
tag_counter = Counter()
|
15 |
+
|
16 |
+
# Iterate through each .txt file in the folder
|
17 |
+
for file_name in os.listdir(folder_path):
|
18 |
+
if file_name.endswith('.txt'):
|
19 |
+
file_path = os.path.join(folder_path, file_name)
|
20 |
+
with open(file_path, 'r') as file:
|
21 |
+
tags = file.read().strip().split(',')
|
22 |
+
# Clean and count each tag
|
23 |
+
tags = [tag.strip().lower() for tag in tags]
|
24 |
+
tag_counter.update(tags)
|
25 |
+
|
26 |
+
# Convert the Counter to a DataFrame for better display
|
27 |
+
tag_data = pd.DataFrame(tag_counter.items(), columns=['Tag', 'Count'])
|
28 |
+
tag_data = tag_data.sort_values(by='Count', ascending=False).reset_index(drop=True)
|
29 |
+
|
30 |
+
# Display the DataFrame as a table in Streamlit
|
31 |
+
st.subheader('Tag Frequency Table')
|
32 |
+
st.table(tag_data)
|
33 |
+
|
transfer.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
|
4 |
+
def move_non_npz_files(source_folder, destination_folder):
|
5 |
+
# Ensure the destination folder exists
|
6 |
+
if not os.path.exists(destination_folder):
|
7 |
+
os.makedirs(destination_folder)
|
8 |
+
|
9 |
+
# Iterate over all files in the source folder
|
10 |
+
for filename in os.listdir(source_folder):
|
11 |
+
# Construct the full file path
|
12 |
+
source_file_path = os.path.join(source_folder, filename)
|
13 |
+
|
14 |
+
# Check if the current item is a file (not a directory) and not a .npz file
|
15 |
+
if os.path.isfile(source_file_path) and not filename.endswith('.npz'):
|
16 |
+
# Construct the destination file path
|
17 |
+
destination_file_path = os.path.join(destination_folder, filename)
|
18 |
+
|
19 |
+
# Move the file
|
20 |
+
shutil.move(source_file_path, destination_file_path)
|
21 |
+
print(f'Moved: {filename} to {destination_folder}')
|
22 |
+
|
23 |
+
# Define your source and destination folders
|
24 |
+
source_folder = '/path/to/source/folder'
|
25 |
+
destination_folder = '/path/to/destination/folder'
|
26 |
+
|
27 |
+
# Move non-.npz files
|
28 |
+
move_non_npz_files(source_folder, destination_folder)
|