id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,287,900 | transformer.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/transformer.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Transformer modules."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import constant_, xavier_uniform_
from .conv import Conv
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
__all__ = (
"TransformerEncoderLayer",
"TransformerLayer",
"TransformerBlock",
"MLPBlock",
"LayerNorm2d",
"AIFI",
"DeformableTransformerDecoder",
"DeformableTransformerDecoderLayer",
"MSDeformAttn",
"MLP",
)
class TransformerEncoderLayer(nn.Module):
"""Defines a single layer of the transformer encoder."""
def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False):
"""Initialize the TransformerEncoderLayer with specified parameters."""
super().__init__()
from ...utils.torch_utils import TORCH_1_9
if not TORCH_1_9:
raise ModuleNotFoundError(
"TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)."
)
self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.fc1 = nn.Linear(c1, cm)
self.fc2 = nn.Linear(cm, c1)
self.norm1 = nn.LayerNorm(c1)
self.norm2 = nn.LayerNorm(c1)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.act = act
self.normalize_before = normalize_before
@staticmethod
def with_pos_embed(tensor, pos=None):
"""Add position embeddings to the tensor if provided."""
return tensor if pos is None else tensor + pos
def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Performs forward pass with post-normalization."""
q = k = self.with_pos_embed(src, pos)
src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
src = src + self.dropout2(src2)
return self.norm2(src)
def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Performs forward pass with pre-normalization."""
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
return src + self.dropout2(src2)
def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Forward propagates the input through the encoder module."""
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class AIFI(TransformerEncoderLayer):
"""Defines the AIFI transformer layer."""
def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False):
"""Initialize the AIFI instance with specified parameters."""
super().__init__(c1, cm, num_heads, dropout, act, normalize_before)
def forward(self, x):
"""Forward pass for the AIFI transformer layer."""
c, h, w = x.shape[1:]
pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
# Flatten [B, C, H, W] to [B, HxW, C]
x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous()
@staticmethod
def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0):
"""Builds 2D sine-cosine position embedding."""
assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij")
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1.0 / (temperature**omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None]
class TransformerLayer(nn.Module):
"""Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)."""
def __init__(self, c, num_heads):
"""Initializes a self-attention mechanism using linear transformations and multi-head attention."""
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
"""Apply a transformer block to the input x and return the output."""
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
return self.fc2(self.fc1(x)) + x
class TransformerBlock(nn.Module):
"""Vision Transformer https://arxiv.org/abs/2010.11929."""
def __init__(self, c1, c2, num_heads, num_layers):
"""Initialize a Transformer module with position embedding and specified number of heads and layers."""
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
"""Forward propagates the input through the bottleneck module."""
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).permute(2, 0, 1)
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
class MLPBlock(nn.Module):
"""Implements a single block of a multi-layer perceptron."""
def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
"""Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function."""
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass for the MLPBlock."""
return self.lin2(self.act(self.lin1(x)))
class MLP(nn.Module):
"""Implements a simple multi-layer perceptron (also called FFN)."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
"""Initialize the MLP with specified input, hidden, output dimensions and number of layers."""
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
"""Forward pass for the entire MLP."""
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class LayerNorm2d(nn.Module):
"""
2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.
Original implementations in
https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py
and
https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py.
"""
def __init__(self, num_channels, eps=1e-6):
"""Initialize LayerNorm2d with the given parameters."""
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
"""Perform forward pass for 2D layer normalization."""
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
return self.weight[:, None, None] * x + self.bias[:, None, None]
class MSDeformAttn(nn.Module):
"""
Multi-Scale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
"""
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""Initialize MSDeformAttn with the given parameters."""
super().__init__()
if d_model % n_heads != 0:
raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
_d_per_head = d_model // n_heads
# Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation
assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`"
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
"""Reset module parameters."""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.n_heads, 1, 1, 2)
.repeat(1, self.n_levels, self.n_points, 1)
)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def forward(self, query, refer_bbox, value, value_shapes, value_mask=None):
"""
Perform forward pass for multiscale deformable attention.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
Args:
query (torch.Tensor): [bs, query_length, C]
refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area
value (torch.Tensor): [bs, value_length, C]
value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
Returns:
output (Tensor): [bs, Length_{query}, C]
"""
bs, len_q = query.shape[:2]
len_v = value.shape[1]
assert sum(s[0] * s[1] for s in value_shapes) == len_v
value = self.value_proj(value)
if value_mask is not None:
value = value.masked_fill(value_mask[..., None], float(0))
value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
num_points = refer_bbox.shape[-1]
if num_points == 2:
offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1)
add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
sampling_locations = refer_bbox[:, :, None, :, None, :] + add
elif num_points == 4:
add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5
sampling_locations = refer_bbox[:, :, None, :, None, :2] + add
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.")
output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights)
return self.output_proj(output)
class DeformableTransformerDecoderLayer(nn.Module):
"""
Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
"""
def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4):
"""Initialize the DeformableTransformerDecoderLayer with the given parameters."""
super().__init__()
# Self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# Cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# FFN
self.linear1 = nn.Linear(d_model, d_ffn)
self.act = act
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
"""Add positional embeddings to the input tensor, if provided."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
"""Perform forward pass through the Feed-Forward Network part of the layer."""
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
return self.norm3(tgt)
def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):
"""Perform the forward pass through the entire decoder layer."""
# Self attention
q = k = self.with_pos_embed(embed, query_pos)
tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
0
].transpose(0, 1)
embed = embed + self.dropout1(tgt)
embed = self.norm1(embed)
# Cross attention
tgt = self.cross_attn(
self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask
)
embed = embed + self.dropout2(tgt)
embed = self.norm2(embed)
# FFN
return self.forward_ffn(embed)
class DeformableTransformerDecoder(nn.Module):
"""
Implementation of Deformable Transformer Decoder based on PaddleDetection.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
"""
def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
"""Initialize the DeformableTransformerDecoder with the given parameters."""
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx
def forward(
self,
embed, # decoder embeddings
refer_bbox, # anchor
feats, # image features
shapes, # feature shapes
bbox_head,
score_head,
pos_mlp,
attn_mask=None,
padding_mask=None,
):
"""Perform the forward pass through the entire decoder."""
output = embed
dec_bboxes = []
dec_cls = []
last_refined_bbox = None
refer_bbox = refer_bbox.sigmoid()
for i, layer in enumerate(self.layers):
output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))
bbox = bbox_head[i](output)
refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))
if self.training:
dec_cls.append(score_head[i](output))
if i == 0:
dec_bboxes.append(refined_bbox)
else:
dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))
elif i == self.eval_idx:
dec_cls.append(score_head[i](output))
dec_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
refer_bbox = refined_bbox.detach() if self.training else refined_bbox
return torch.stack(dec_bboxes), torch.stack(dec_cls)
| 17,910 | Python | .py | 348 | 42.850575 | 122 | 0.632521 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,901 | attention.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/attention.py | import torch
from torch import nn, Tensor, LongTensor
from typing import Tuple, Optional
__all__ = [
"GAM_Attention",
]
# GAM Attention Start
def channel_shuffle(x, groups=2): ##shuffle channel
# RESHAPE----->transpose------->Flatten
B, C, H, W = x.size()
out = x.view(B, groups, C // groups, H, W).permute(0, 2, 1, 3, 4).contiguous()
out = out.view(B, C, H, W)
return out
# Global Attention Mechanism
# https://arxiv.org/abs/2112.05561
class GAM_Attention(nn.Module):
def __init__(self, c1, c2, group=True, rate=4):
super(GAM_Attention, self).__init__()
self.channel_attention = nn.Sequential(
nn.Linear(c1, int(c1 / rate)),
nn.ReLU(inplace=True),
nn.Linear(int(c1 / rate), c1),
)
self.spatial_attention = nn.Sequential(
nn.Conv2d(c1, c1 // rate, kernel_size=7, padding=3, groups=rate)
if group
else nn.Conv2d(c1, int(c1 / rate), kernel_size=7, padding=3),
nn.BatchNorm2d(int(c1 / rate)),
nn.ReLU(inplace=True),
nn.Conv2d(c1 // rate, c2, kernel_size=7, padding=3, groups=rate)
if group
else nn.Conv2d(int(c1 / rate), c2, kernel_size=7, padding=3),
nn.BatchNorm2d(c2),
)
def forward(self, x):
b, c, h, w = x.shape
x_permute = x.permute(0, 2, 3, 1).view(b, -1, c)
x_att_permute = self.channel_attention(x_permute).view(b, h, w, c)
x_channel_att = x_att_permute.permute(0, 3, 1, 2)
x = x * x_channel_att
x_spatial_att = self.spatial_attention(x).sigmoid()
x_spatial_att = channel_shuffle(x_spatial_att, 4) # last shuffle
out = x * x_spatial_att
return out
# GAM Attention End | 1,776 | Python | .py | 45 | 31.8 | 82 | 0.584642 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,902 | conv.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/conv.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Convolution modules."""
import math
import numpy as np
import torch
import torch.nn as nn
__all__ = (
"Conv",
"Conv2",
"LightConv",
"DWConv",
"DWConvTranspose2d",
"ConvTranspose",
"Focus",
"GhostConv",
"ChannelAttention",
"SpatialAttention",
"CBAM",
"Concat",
"RepConv",
)
def autopad(k, p=None, d=1): # kernel, padding, dilation
"""Pad to 'same' shape outputs."""
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
"""Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
"""Initialize Conv layer with given arguments including activation."""
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
"""Apply convolution, batch normalization and activation to input tensor."""
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
"""Perform transposed convolution of 2D data."""
return self.act(self.conv(x))
class Conv2(Conv):
"""Simplified RepConv module with Conv fusing."""
def __init__(self, c1, c2, k=3, s=1, p=None, g=1, d=1, act=True):
"""Initialize Conv layer with given arguments including activation."""
super().__init__(c1, c2, k, s, p, g=g, d=d, act=act)
self.cv2 = nn.Conv2d(c1, c2, 1, s, autopad(1, p, d), groups=g, dilation=d, bias=False) # add 1x1 conv
def forward(self, x):
"""Apply convolution, batch normalization and activation to input tensor."""
return self.act(self.bn(self.conv(x) + self.cv2(x)))
def forward_fuse(self, x):
"""Apply fused convolution, batch normalization and activation to input tensor."""
return self.act(self.bn(self.conv(x)))
def fuse_convs(self):
"""Fuse parallel convolutions."""
w = torch.zeros_like(self.conv.weight.data)
i = [x // 2 for x in w.shape[2:]]
w[:, :, i[0] : i[0] + 1, i[1] : i[1] + 1] = self.cv2.weight.data.clone()
self.conv.weight.data += w
self.__delattr__("cv2")
self.forward = self.forward_fuse
class LightConv(nn.Module):
"""
Light convolution with args(ch_in, ch_out, kernel).
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
def __init__(self, c1, c2, k=1, act=nn.ReLU()):
"""Initialize Conv layer with given arguments including activation."""
super().__init__()
self.conv1 = Conv(c1, c2, 1, act=False)
self.conv2 = DWConv(c2, c2, k, act=act)
def forward(self, x):
"""Apply 2 convolutions to input tensor."""
return self.conv2(self.conv1(x))
class DWConv(Conv):
"""Depth-wise convolution."""
def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
"""Initialize Depth-wise convolution with given parameters."""
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
class DWConvTranspose2d(nn.ConvTranspose2d):
"""Depth-wise transpose convolution."""
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
"""Initialize DWConvTranspose2d class with given parameters."""
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
class ConvTranspose(nn.Module):
"""Convolution transpose 2d layer."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True):
"""Initialize ConvTranspose2d layer with batch normalization and activation function."""
super().__init__()
self.conv_transpose = nn.ConvTranspose2d(c1, c2, k, s, p, bias=not bn)
self.bn = nn.BatchNorm2d(c2) if bn else nn.Identity()
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
"""Applies transposed convolutions, batch normalization and activation to input."""
return self.act(self.bn(self.conv_transpose(x)))
def forward_fuse(self, x):
"""Applies activation and convolution transpose operation to input."""
return self.act(self.conv_transpose(x))
class Focus(nn.Module):
"""Focus wh information into c-space."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
"""Initializes Focus object with user defined channel, convolution, padding, group and activation values."""
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
# self.contract = Contract(gain=2)
def forward(self, x):
"""
Applies convolution to concatenated tensor and returns the output.
Input shape is (b,c,w,h) and output shape is (b,4c,w/2,h/2).
"""
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
"""Ghost Convolution https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
"""Initializes the GhostConv object with input channels, output channels, kernel size, stride, groups and
activation.
"""
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
def forward(self, x):
"""Forward propagation through a Ghost Bottleneck layer with skip connection."""
y = self.cv1(x)
return torch.cat((y, self.cv2(y)), 1)
class RepConv(nn.Module):
"""
RepConv is a basic rep-style block, including training and deploy status.
This module is used in RT-DETR.
Based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
"""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=3, s=1, p=1, g=1, d=1, act=True, bn=False, deploy=False):
"""Initializes Light Convolution layer with inputs, outputs & optional activation function."""
super().__init__()
assert k == 3 and p == 1
self.g = g
self.c1 = c1
self.c2 = c2
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
self.bn = nn.BatchNorm2d(num_features=c1) if bn and c2 == c1 and s == 1 else None
self.conv1 = Conv(c1, c2, k, s, p=p, g=g, act=False)
self.conv2 = Conv(c1, c2, 1, s, p=(p - k // 2), g=g, act=False)
def forward_fuse(self, x):
"""Forward process."""
return self.act(self.conv(x))
def forward(self, x):
"""Forward process."""
id_out = 0 if self.bn is None else self.bn(x)
return self.act(self.conv1(x) + self.conv2(x) + id_out)
def get_equivalent_kernel_bias(self):
"""Returns equivalent kernel and bias by adding 3x3 kernel, 1x1 kernel and identity kernel with their biases."""
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)
kernelid, biasid = self._fuse_bn_tensor(self.bn)
return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
"""Pads a 1x1 tensor to a 3x3 tensor."""
if kernel1x1 is None:
return 0
else:
return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch):
"""Generates appropriate kernels and biases for convolution by fusing branches of the neural network."""
if branch is None:
return 0, 0
if isinstance(branch, Conv):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
elif isinstance(branch, nn.BatchNorm2d):
if not hasattr(self, "id_tensor"):
input_dim = self.c1 // self.g
kernel_value = np.zeros((self.c1, input_dim, 3, 3), dtype=np.float32)
for i in range(self.c1):
kernel_value[i, i % input_dim, 1, 1] = 1
self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def fuse_convs(self):
"""Combines two convolution layers into a single layer and removes unused attributes from the class."""
if hasattr(self, "conv"):
return
kernel, bias = self.get_equivalent_kernel_bias()
self.conv = nn.Conv2d(
in_channels=self.conv1.conv.in_channels,
out_channels=self.conv1.conv.out_channels,
kernel_size=self.conv1.conv.kernel_size,
stride=self.conv1.conv.stride,
padding=self.conv1.conv.padding,
dilation=self.conv1.conv.dilation,
groups=self.conv1.conv.groups,
bias=True,
).requires_grad_(False)
self.conv.weight.data = kernel
self.conv.bias.data = bias
for para in self.parameters():
para.detach_()
self.__delattr__("conv1")
self.__delattr__("conv2")
if hasattr(self, "nm"):
self.__delattr__("nm")
if hasattr(self, "bn"):
self.__delattr__("bn")
if hasattr(self, "id_tensor"):
self.__delattr__("id_tensor")
class ChannelAttention(nn.Module):
"""Channel-attention module https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet."""
def __init__(self, channels: int) -> None:
"""Initializes the class and sets the basic configurations and instance variables required."""
super().__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True)
self.act = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Applies forward pass using activation on convolutions of the input, optionally using batch normalization."""
return x * self.act(self.fc(self.pool(x)))
class SpatialAttention(nn.Module):
"""Spatial-attention module."""
def __init__(self, kernel_size=7):
"""Initialize Spatial-attention module with kernel size argument."""
super().__init__()
assert kernel_size in (3, 7), "kernel size must be 3 or 7"
padding = 3 if kernel_size == 7 else 1
self.cv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.act = nn.Sigmoid()
def forward(self, x):
"""Apply channel and spatial attention on input for feature recalibration."""
return x * self.act(self.cv1(torch.cat([torch.mean(x, 1, keepdim=True), torch.max(x, 1, keepdim=True)[0]], 1)))
class CBAM(nn.Module):
"""Convolutional Block Attention Module."""
def __init__(self, c1, kernel_size=7):
"""Initialize CBAM with given input channel (c1) and kernel size."""
super().__init__()
self.channel_attention = ChannelAttention(c1)
self.spatial_attention = SpatialAttention(kernel_size)
def forward(self, x):
"""Applies the forward pass through C1 module."""
return self.spatial_attention(self.channel_attention(x))
class Concat(nn.Module):
"""Concatenate a list of tensors along dimension."""
def __init__(self, dimension=1):
"""Concatenates a list of tensors along a specified dimension."""
super().__init__()
self.d = dimension
def forward(self, x):
"""Forward pass for the YOLOv8 mask Proto module."""
return torch.cat(x, self.d)
| 12,722 | Python | .py | 262 | 40.461832 | 120 | 0.61369 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,903 | block.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/block.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Block modules."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv
from .transformer import TransformerBlock
__all__ = (
"DFL",
"HGBlock",
"HGStem",
"SPP",
"SPPF",
"C1",
"C2",
"C3",
"C2f",
"C2fAttn",
"ImagePoolingAttn",
"ContrastiveHead",
"BNContrastiveHead",
"C3x",
"C3TR",
"C3Ghost",
"GhostBottleneck",
"Bottleneck",
"BottleneckCSP",
"Proto",
"RepC3",
"ResNetLayer",
)
class DFL(nn.Module):
"""
Integral module of Distribution Focal Loss (DFL).
Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
"""
def __init__(self, c1=16):
"""Initialize a convolutional layer with a given number of input channels."""
super().__init__()
self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
x = torch.arange(c1, dtype=torch.float)
self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
self.c1 = c1
def forward(self, x):
"""Applies a transformer layer on input tensor 'x' and returns a tensor."""
b, c, a = x.shape # batch, channels, anchors
return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)
# return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a)
class Proto(nn.Module):
"""YOLOv8 mask Proto module for segmentation models."""
def __init__(self, c1, c_=256, c2=32):
"""
Initializes the YOLOv8 mask Proto module with specified number of protos and masks.
Input arguments are ch_in, number of protos, number of masks.
"""
super().__init__()
self.cv1 = Conv(c1, c_, k=3)
self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest')
self.cv2 = Conv(c_, c_, k=3)
self.cv3 = Conv(c_, c2)
def forward(self, x):
"""Performs a forward pass through layers using an upsampled input image."""
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
class HGStem(nn.Module):
"""
StemBlock of PPHGNetV2 with 5 convolutions and one maxpool2d.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
def __init__(self, c1, cm, c2):
"""Initialize the SPP layer with input/output channels and specified kernel sizes for max pooling."""
super().__init__()
self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU())
self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU())
self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU())
self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU())
self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU())
self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True)
def forward(self, x):
"""Forward pass of a PPHGNetV2 backbone layer."""
x = self.stem1(x)
x = F.pad(x, [0, 1, 0, 1])
x2 = self.stem2a(x)
x2 = F.pad(x2, [0, 1, 0, 1])
x2 = self.stem2b(x2)
x1 = self.pool(x)
x = torch.cat([x1, x2], dim=1)
x = self.stem3(x)
x = self.stem4(x)
return x
class HGBlock(nn.Module):
"""
HG_Block of PPHGNetV2 with 2 convolutions and LightConv.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()):
"""Initializes a CSP Bottleneck with 1 convolution using specified input and output channels."""
super().__init__()
block = LightConv if lightconv else Conv
self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n))
self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv
self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv
self.add = shortcut and c1 == c2
def forward(self, x):
"""Forward pass of a PPHGNetV2 backbone layer."""
y = [x]
y.extend(m(y[-1]) for m in self.m)
y = self.ec(self.sc(torch.cat(y, 1)))
return y + x if self.add else y
class SPP(nn.Module):
"""Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729."""
def __init__(self, c1, c2, k=(5, 9, 13)):
"""Initialize the SPP layer with input/output channels and pooling kernel sizes."""
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
"""Forward pass of the SPP layer, performing spatial pyramid pooling."""
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPF(nn.Module):
"""Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher."""
def __init__(self, c1, c2, k=5):
"""
Initializes the SPPF layer with given input/output channels and kernel size.
This module is equivalent to SPP(k=(5, 9, 13)).
"""
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
"""Forward pass through Ghost Convolution block."""
x = self.cv1(x)
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
class C1(nn.Module):
"""CSP Bottleneck with 1 convolution."""
def __init__(self, c1, c2, n=1):
"""Initializes the CSP Bottleneck with configurations for 1 convolution with arguments ch_in, ch_out, number."""
super().__init__()
self.cv1 = Conv(c1, c2, 1, 1)
self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n)))
def forward(self, x):
"""Applies cross-convolutions to input in the C3 module."""
y = self.cv1(x)
return self.m(y) + y
class C2(nn.Module):
"""CSP Bottleneck with 2 convolutions."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes the CSP Bottleneck with 2 convolutions module with arguments ch_in, ch_out, number, shortcut,
groups, expansion.
"""
super().__init__()
self.c = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2)
# self.attention = ChannelAttention(2 * self.c) # or SpatialAttention()
self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)))
def forward(self, x):
"""Forward pass through the CSP bottleneck with 2 convolutions."""
a, b = self.cv1(x).chunk(2, 1)
return self.cv2(torch.cat((self.m(a), b), 1))
class C2f(nn.Module):
"""Faster Implementation of CSP Bottleneck with 2 convolutions."""
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
"""Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
expansion.
"""
super().__init__()
self.c = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
def forward(self, x):
"""Forward pass through C2f layer."""
y = list(self.cv1(x).chunk(2, 1))
y.extend(m(y[-1]) for m in self.m)
return self.cv2(torch.cat(y, 1))
def forward_split(self, x):
"""Forward pass using split() instead of chunk()."""
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend(m(y[-1]) for m in self.m)
return self.cv2(torch.cat(y, 1))
class C3(nn.Module):
"""CSP Bottleneck with 3 convolutions."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values."""
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n)))
def forward(self, x):
"""Forward pass through the CSP bottleneck with 2 convolutions."""
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
class C3x(C3):
"""C3 module with cross-convolutions."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initialize C3TR instance and set default parameters."""
super().__init__(c1, c2, n, shortcut, g, e)
self.c_ = int(c2 * e)
self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n)))
class RepC3(nn.Module):
"""Rep C3."""
def __init__(self, c1, c2, n=3, e=1.0):
"""Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number."""
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c2, 1, 1)
self.cv2 = Conv(c1, c2, 1, 1)
self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)])
self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity()
def forward(self, x):
"""Forward pass of RT-DETR neck layer."""
return self.cv3(self.m(self.cv1(x)) + self.cv2(x))
class C3TR(C3):
"""C3 module with TransformerBlock()."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initialize C3Ghost module with GhostBottleneck()."""
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3Ghost(C3):
"""C3 module with GhostBottleneck()."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling."""
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
class GhostBottleneck(nn.Module):
"""Ghost Bottleneck https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=3, s=1):
"""Initializes GhostBottleneck module with arguments ch_in, ch_out, kernel, stride."""
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(
GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False), # pw-linear
)
self.shortcut = (
nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
)
def forward(self, x):
"""Applies skip connection and concatenation to input tensor."""
return self.conv(x) + self.shortcut(x)
class Bottleneck(nn.Module):
"""Standard bottleneck."""
def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
"""Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and
expansion.
"""
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, k[0], 1)
self.cv2 = Conv(c_, c2, k[1], 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
"""'forward()' applies the YOLO FPN to input data."""
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
"""CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
"""Initializes the CSP Bottleneck given arguments for ch_in, ch_out, number, shortcut, groups, expansion."""
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
"""Applies a CSP bottleneck with 3 convolutions."""
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
class ResNetBlock(nn.Module):
"""ResNet block with standard convolution layers."""
def __init__(self, c1, c2, s=1, e=4):
"""Initialize convolution with given parameters."""
super().__init__()
c3 = e * c2
self.cv1 = Conv(c1, c2, k=1, s=1, act=True)
self.cv2 = Conv(c2, c2, k=3, s=s, p=1, act=True)
self.cv3 = Conv(c2, c3, k=1, act=False)
self.shortcut = nn.Sequential(Conv(c1, c3, k=1, s=s, act=False)) if s != 1 or c1 != c3 else nn.Identity()
def forward(self, x):
"""Forward pass through the ResNet block."""
return F.relu(self.cv3(self.cv2(self.cv1(x))) + self.shortcut(x))
class ResNetLayer(nn.Module):
"""ResNet layer with multiple ResNet blocks."""
def __init__(self, c1, c2, s=1, is_first=False, n=1, e=4):
"""Initializes the ResNetLayer given arguments."""
super().__init__()
self.is_first = is_first
if self.is_first:
self.layer = nn.Sequential(
Conv(c1, c2, k=7, s=2, p=3, act=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
else:
blocks = [ResNetBlock(c1, c2, s, e=e)]
blocks.extend([ResNetBlock(e * c2, c2, 1, e=e) for _ in range(n - 1)])
self.layer = nn.Sequential(*blocks)
def forward(self, x):
"""Forward pass through the ResNet layer."""
return self.layer(x)
class MaxSigmoidAttnBlock(nn.Module):
"""Max Sigmoid attention block."""
def __init__(self, c1, c2, nh=1, ec=128, gc=512, scale=False):
"""Initializes MaxSigmoidAttnBlock with specified arguments."""
super().__init__()
self.nh = nh
self.hc = c2 // nh
self.ec = Conv(c1, ec, k=1, act=False) if c1 != ec else None
self.gl = nn.Linear(gc, ec)
self.bias = nn.Parameter(torch.zeros(nh))
self.proj_conv = Conv(c1, c2, k=3, s=1, act=False)
self.scale = nn.Parameter(torch.ones(1, nh, 1, 1)) if scale else 1.0
def forward(self, x, guide):
"""Forward process."""
bs, _, h, w = x.shape
guide = self.gl(guide)
guide = guide.view(bs, -1, self.nh, self.hc)
embed = self.ec(x) if self.ec is not None else x
embed = embed.view(bs, self.nh, self.hc, h, w)
aw = torch.einsum("bmchw,bnmc->bmhwn", embed, guide)
aw = aw.max(dim=-1)[0]
aw = aw / (self.hc**0.5)
aw = aw + self.bias[None, :, None, None]
aw = aw.sigmoid() * self.scale
x = self.proj_conv(x)
x = x.view(bs, self.nh, -1, h, w)
x = x * aw.unsqueeze(2)
return x.view(bs, -1, h, w)
class C2fAttn(nn.Module):
"""C2f module with an additional attn module."""
def __init__(self, c1, c2, n=1, ec=128, nh=1, gc=512, shortcut=False, g=1, e=0.5):
"""Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
expansion.
"""
super().__init__()
self.c = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv((3 + n) * self.c, c2, 1) # optional act=FReLU(c2)
self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
self.attn = MaxSigmoidAttnBlock(self.c, self.c, gc=gc, ec=ec, nh=nh)
def forward(self, x, guide):
"""Forward pass through C2f layer."""
y = list(self.cv1(x).chunk(2, 1))
y.extend(m(y[-1]) for m in self.m)
y.append(self.attn(y[-1], guide))
return self.cv2(torch.cat(y, 1))
def forward_split(self, x, guide):
"""Forward pass using split() instead of chunk()."""
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend(m(y[-1]) for m in self.m)
y.append(self.attn(y[-1], guide))
return self.cv2(torch.cat(y, 1))
class ImagePoolingAttn(nn.Module):
"""ImagePoolingAttn: Enhance the text embeddings with image-aware information."""
def __init__(self, ec=256, ch=(), ct=512, nh=8, k=3, scale=False):
"""Initializes ImagePoolingAttn with specified arguments."""
super().__init__()
nf = len(ch)
self.query = nn.Sequential(nn.LayerNorm(ct), nn.Linear(ct, ec))
self.key = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
self.value = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
self.proj = nn.Linear(ec, ct)
self.scale = nn.Parameter(torch.tensor([0.0]), requires_grad=True) if scale else 1.0
self.projections = nn.ModuleList([nn.Conv2d(in_channels, ec, kernel_size=1) for in_channels in ch])
self.im_pools = nn.ModuleList([nn.AdaptiveMaxPool2d((k, k)) for _ in range(nf)])
self.ec = ec
self.nh = nh
self.nf = nf
self.hc = ec // nh
self.k = k
def forward(self, x, text):
"""Executes attention mechanism on input tensor x and guide tensor."""
bs = x[0].shape[0]
assert len(x) == self.nf
num_patches = self.k**2
x = [pool(proj(x)).view(bs, -1, num_patches) for (x, proj, pool) in zip(x, self.projections, self.im_pools)]
x = torch.cat(x, dim=-1).transpose(1, 2)
q = self.query(text)
k = self.key(x)
v = self.value(x)
# q = q.reshape(1, text.shape[1], self.nh, self.hc).repeat(bs, 1, 1, 1)
q = q.reshape(bs, -1, self.nh, self.hc)
k = k.reshape(bs, -1, self.nh, self.hc)
v = v.reshape(bs, -1, self.nh, self.hc)
aw = torch.einsum("bnmc,bkmc->bmnk", q, k)
aw = aw / (self.hc**0.5)
aw = F.softmax(aw, dim=-1)
x = torch.einsum("bmnk,bkmc->bnmc", aw, v)
x = self.proj(x.reshape(bs, -1, self.ec))
return x * self.scale + text
class ContrastiveHead(nn.Module):
"""Contrastive Head for YOLO-World compute the region-text scores according to the similarity between image and text
features.
"""
def __init__(self):
"""Initializes ContrastiveHead with specified region-text similarity parameters."""
super().__init__()
self.bias = nn.Parameter(torch.zeros([]))
self.logit_scale = nn.Parameter(torch.ones([]) * torch.tensor(1 / 0.07).log())
def forward(self, x, w):
"""Forward function of contrastive learning."""
x = F.normalize(x, dim=1, p=2)
w = F.normalize(w, dim=-1, p=2)
x = torch.einsum("bchw,bkc->bkhw", x, w)
return x * self.logit_scale.exp() + self.bias
class BNContrastiveHead(nn.Module):
"""
Batch Norm Contrastive Head for YOLO-World using batch norm instead of l2-normalization.
Args:
embed_dims (int): Embed dimensions of text and image features.
norm_cfg (dict): Normalization parameters.
"""
def __init__(self, embed_dims: int):
"""Initialize ContrastiveHead with region-text similarity parameters."""
super().__init__()
self.norm = nn.BatchNorm2d(embed_dims)
self.bias = nn.Parameter(torch.zeros([]))
# use -1.0 is more stable
self.logit_scale = nn.Parameter(-1.0 * torch.ones([]))
def forward(self, x, w):
"""Forward function of contrastive learning."""
x = self.norm(x)
w = F.normalize(w, dim=-1, p=2)
x = torch.einsum("bchw,bkc->bkhw", x, w)
return x * self.logit_scale.exp() + self.bias
| 20,553 | Python | .py | 438 | 39.148402 | 120 | 0.583213 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,904 | utils.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/utils.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Module utils."""
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import uniform_
__all__ = "multi_scale_deformable_attn_pytorch", "inverse_sigmoid"
def _get_clones(module, n):
"""Create a list of cloned modules from the given module."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def bias_init_with_prob(prior_prob=0.01):
"""Initialize conv/fc bias value according to a given probability value."""
return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init
def linear_init(module):
"""Initialize the weights and biases of a linear module."""
bound = 1 / math.sqrt(module.weight.shape[0])
uniform_(module.weight, -bound, bound)
if hasattr(module, "bias") and module.bias is not None:
uniform_(module.bias, -bound, bound)
def inverse_sigmoid(x, eps=1e-5):
"""Calculate the inverse sigmoid function for a tensor."""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def multi_scale_deformable_attn_pytorch(
value: torch.Tensor,
value_spatial_shapes: torch.Tensor,
sampling_locations: torch.Tensor,
attention_weights: torch.Tensor,
) -> torch.Tensor:
"""
Multi-scale deformable attention.
https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py
"""
bs, _, num_heads, embed_dims = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level, (H_, W_) in enumerate(value_spatial_shapes):
# bs, H_*W_, num_heads, embed_dims ->
# bs, H_*W_, num_heads*embed_dims ->
# bs, num_heads*embed_dims, H_*W_ ->
# bs*num_heads, embed_dims, H_, W_
value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)
# bs, num_queries, num_heads, num_points, 2 ->
# bs, num_heads, num_queries, num_points, 2 ->
# bs*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)
# bs*num_heads, embed_dims, num_queries, num_points
sampling_value_l_ = F.grid_sample(
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
)
sampling_value_list.append(sampling_value_l_)
# (bs, num_queries, num_heads, num_levels, num_points) ->
# (bs, num_heads, num_queries, num_levels, num_points) ->
# (bs, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
bs * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(bs, num_heads * embed_dims, num_queries)
)
return output.transpose(1, 2).contiguous()
| 3,197 | Python | .py | 70 | 40.228571 | 107 | 0.655848 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,905 | __init__.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/__init__.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""
Ultralytics modules.
Example:
Visualize a module with Netron.
```python
from ultralytics.nn.modules import *
import torch
import os
x = torch.ones(1, 128, 40, 40)
m = Conv(128, 128)
f = f'{m._get_name()}.onnx'
torch.onnx.export(m, x, f)
os.system(f'onnxsim {f} {f} && open {f}')
```
"""
from .block import (
C1,
C2,
C3,
C3TR,
DFL,
SPP,
SPPF,
Bottleneck,
BottleneckCSP,
C2f,
C2fAttn,
ImagePoolingAttn,
C3Ghost,
C3x,
GhostBottleneck,
HGBlock,
HGStem,
Proto,
RepC3,
ResNetLayer,
ContrastiveHead,
BNContrastiveHead,
)
from .conv import (
CBAM,
ChannelAttention,
Concat,
Conv,
Conv2,
ConvTranspose,
DWConv,
DWConvTranspose2d,
Focus,
GhostConv,
LightConv,
RepConv,
SpatialAttention,
)
from .head import OBB, Classify, Detect, Pose, RTDETRDecoder, Segment, WorldDetect
from .transformer import (
AIFI,
MLP,
DeformableTransformerDecoder,
DeformableTransformerDecoderLayer,
LayerNorm2d,
MLPBlock,
MSDeformAttn,
TransformerBlock,
TransformerEncoderLayer,
TransformerLayer,
)
from .attention import(
GAM_Attention
)
__all__ = (
"Conv",
"Conv2",
"LightConv",
"RepConv",
"DWConv",
"DWConvTranspose2d",
"ConvTranspose",
"Focus",
"GhostConv",
"ChannelAttention",
"SpatialAttention",
"CBAM",
"Concat",
"TransformerLayer",
"TransformerBlock",
"MLPBlock",
"LayerNorm2d",
"DFL",
"HGBlock",
"HGStem",
"SPP",
"SPPF",
"C1",
"C2",
"C3",
"C2f",
"C2fAttn",
"C3x",
"C3TR",
"C3Ghost",
"GhostBottleneck",
"Bottleneck",
"BottleneckCSP",
"Proto",
"Detect",
"Segment",
"Pose",
"Classify",
"TransformerEncoderLayer",
"RepC3",
"RTDETRDecoder",
"AIFI",
"DeformableTransformerDecoder",
"DeformableTransformerDecoderLayer",
"MSDeformAttn",
"MLP",
"ResNetLayer",
"OBB",
"WorldDetect",
"ImagePoolingAttn",
"ContrastiveHead",
"BNContrastiveHead",
"GAM_Attention"
)
| 2,217 | Python | .py | 126 | 13.063492 | 82 | 0.621764 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,906 | head.py | arojsubedi_Improved-YOLOv8s/ultralytics/nn/modules/head.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Model head modules."""
import math
import torch
import torch.nn as nn
from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
from .block import DFL, Proto, ContrastiveHead, BNContrastiveHead
from .conv import Conv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init
__all__ = "Detect", "Segment", "Pose", "Classify", "OBB", "RTDETRDecoder"
class Detect(nn.Module):
"""YOLOv8 Detect head for detection models."""
dynamic = False # force grid reconstruction
export = False # export mode
shape = None
anchors = torch.empty(0) # init
strides = torch.empty(0) # init
def __init__(self, nc=80, ch=()):
"""Initializes the YOLOv8 detection layer with specified number of classes and channels."""
super().__init__()
self.nc = nc # number of classes
self.nl = len(ch) # number of detection layers
self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
self.no = nc + self.reg_max * 4 # number of outputs per anchor
self.stride = torch.zeros(self.nl) # strides computed during build
c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels
self.cv2 = nn.ModuleList(
nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch
)
self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()
def forward(self, x):
"""Concatenates and returns predicted bounding boxes and class probabilities."""
for i in range(self.nl):
x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)
if self.training: # Training path
return x
# Inference path
shape = x[0].shape # BCHW
x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
if self.dynamic or self.shape != shape:
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
self.shape = shape
if self.export and self.format in ("saved_model", "pb", "tflite", "edgetpu", "tfjs"): # avoid TF FlexSplitV ops
box = x_cat[:, : self.reg_max * 4]
cls = x_cat[:, self.reg_max * 4 :]
else:
box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
if self.export and self.format in ("tflite", "edgetpu"):
# Precompute normalization factor to increase numerical stability
# See https://github.com/ultralytics/ultralytics/issues/7371
grid_h = shape[2]
grid_w = shape[3]
grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
norm = self.strides / (self.stride[0] * grid_size)
dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
else:
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
y = torch.cat((dbox, cls.sigmoid()), 1)
return y if self.export else (y, x)
def bias_init(self):
"""Initialize Detect() biases, WARNING: requires stride availability."""
m = self # self.model[-1] # Detect() module
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
# ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency
for a, b, s in zip(m.cv2, m.cv3, m.stride): # from
a[-1].bias.data[:] = 1.0 # box
b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
def decode_bboxes(self, bboxes, anchors):
"""Decode bounding boxes."""
return dist2bbox(bboxes, anchors, xywh=True, dim=1)
class Segment(Detect):
"""YOLOv8 Segment head for segmentation models."""
def __init__(self, nc=80, nm=32, npr=256, ch=()):
"""Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers."""
super().__init__(nc, ch)
self.nm = nm # number of masks
self.npr = npr # number of protos
self.proto = Proto(ch[0], self.npr, self.nm) # protos
self.detect = Detect.forward
c4 = max(ch[0] // 4, self.nm)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch)
def forward(self, x):
"""Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
p = self.proto(x[0]) # mask protos
bs = p.shape[0] # batch size
mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients
x = self.detect(self, x)
if self.training:
return x, mc, p
return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p))
class OBB(Detect):
"""YOLOv8 OBB detection head for detection with rotation models."""
def __init__(self, nc=80, ne=1, ch=()):
"""Initialize OBB with number of classes `nc` and layer channels `ch`."""
super().__init__(nc, ch)
self.ne = ne # number of extra parameters
self.detect = Detect.forward
c4 = max(ch[0] // 4, self.ne)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.ne, 1)) for x in ch)
def forward(self, x):
"""Concatenates and returns predicted bounding boxes and class probabilities."""
bs = x[0].shape[0] # batch size
angle = torch.cat([self.cv4[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2) # OBB theta logits
# NOTE: set `angle` as an attribute so that `decode_bboxes` could use it.
angle = (angle.sigmoid() - 0.25) * math.pi # [-pi/4, 3pi/4]
# angle = angle.sigmoid() * math.pi / 2 # [0, pi/2]
if not self.training:
self.angle = angle
x = self.detect(self, x)
if self.training:
return x, angle
return torch.cat([x, angle], 1) if self.export else (torch.cat([x[0], angle], 1), (x[1], angle))
def decode_bboxes(self, bboxes, anchors):
"""Decode rotated bounding boxes."""
return dist2rbox(bboxes, self.angle, anchors, dim=1)
class Pose(Detect):
"""YOLOv8 Pose head for keypoints models."""
def __init__(self, nc=80, kpt_shape=(17, 3), ch=()):
"""Initialize YOLO network with default parameters and Convolutional Layers."""
super().__init__(nc, ch)
self.kpt_shape = kpt_shape # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
self.nk = kpt_shape[0] * kpt_shape[1] # number of keypoints total
self.detect = Detect.forward
c4 = max(ch[0] // 4, self.nk)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch)
def forward(self, x):
"""Perform forward pass through YOLO model and return predictions."""
bs = x[0].shape[0] # batch size
kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w)
x = self.detect(self, x)
if self.training:
return x, kpt
pred_kpt = self.kpts_decode(bs, kpt)
return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))
def kpts_decode(self, bs, kpts):
"""Decodes keypoints."""
ndim = self.kpt_shape[1]
if self.export: # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
y = kpts.view(bs, *self.kpt_shape, -1)
a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
if ndim == 3:
a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
return a.view(bs, self.nk, -1)
else:
y = kpts.clone()
if ndim == 3:
y[:, 2::3] = y[:, 2::3].sigmoid() # sigmoid (WARNING: inplace .sigmoid_() Apple MPS bug)
y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides
y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides
return y
class Classify(nn.Module):
"""YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2)."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
"""Initializes YOLOv8 classification head with specified input and output channels, kernel size, stride,
padding, and groups.
"""
super().__init__()
c_ = 1280 # efficientnet_b0 size
self.conv = Conv(c1, c_, k, s, p, g)
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
self.drop = nn.Dropout(p=0.0, inplace=True)
self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x):
"""Performs a forward pass of the YOLO model on input image data."""
if isinstance(x, list):
x = torch.cat(x, 1)
x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
return x if self.training else x.softmax(1)
class WorldDetect(Detect):
def __init__(self, nc=80, embed=512, with_bn=False, ch=()):
"""Initialize YOLOv8 detection layer with nc classes and layer channels ch."""
super().__init__(nc, ch)
c3 = max(ch[0], min(self.nc, 100))
self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, embed, 1)) for x in ch)
self.cv4 = nn.ModuleList(BNContrastiveHead(embed) if with_bn else ContrastiveHead() for _ in ch)
def forward(self, x, text):
"""Concatenates and returns predicted bounding boxes and class probabilities."""
for i in range(self.nl):
x[i] = torch.cat((self.cv2[i](x[i]), self.cv4[i](self.cv3[i](x[i]), text)), 1)
if self.training:
return x
# Inference path
shape = x[0].shape # BCHW
x_cat = torch.cat([xi.view(shape[0], self.nc + self.reg_max * 4, -1) for xi in x], 2)
if self.dynamic or self.shape != shape:
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
self.shape = shape
if self.export and self.format in ("saved_model", "pb", "tflite", "edgetpu", "tfjs"): # avoid TF FlexSplitV ops
box = x_cat[:, : self.reg_max * 4]
cls = x_cat[:, self.reg_max * 4 :]
else:
box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
if self.export and self.format in ("tflite", "edgetpu"):
# Precompute normalization factor to increase numerical stability
# See https://github.com/ultralytics/ultralytics/issues/7371
grid_h = shape[2]
grid_w = shape[3]
grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
norm = self.strides / (self.stride[0] * grid_size)
dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
else:
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
y = torch.cat((dbox, cls.sigmoid()), 1)
return y if self.export else (y, x)
class RTDETRDecoder(nn.Module):
"""
Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection.
This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes
and class labels for objects in an image. It integrates features from multiple layers and runs through a series of
Transformer decoder layers to output the final predictions.
"""
export = False # export mode
def __init__(
self,
nc=80,
ch=(512, 1024, 2048),
hd=256, # hidden dim
nq=300, # num queries
ndp=4, # num decoder points
nh=8, # num head
ndl=6, # num decoder layers
d_ffn=1024, # dim of feedforward
dropout=0.0,
act=nn.ReLU(),
eval_idx=-1,
# Training args
nd=100, # num denoising
label_noise_ratio=0.5,
box_noise_scale=1.0,
learnt_init_query=False,
):
"""
Initializes the RTDETRDecoder module with the given parameters.
Args:
nc (int): Number of classes. Default is 80.
ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048).
hd (int): Dimension of hidden layers. Default is 256.
nq (int): Number of query points. Default is 300.
ndp (int): Number of decoder points. Default is 4.
nh (int): Number of heads in multi-head attention. Default is 8.
ndl (int): Number of decoder layers. Default is 6.
d_ffn (int): Dimension of the feed-forward networks. Default is 1024.
dropout (float): Dropout rate. Default is 0.
act (nn.Module): Activation function. Default is nn.ReLU.
eval_idx (int): Evaluation index. Default is -1.
nd (int): Number of denoising. Default is 100.
label_noise_ratio (float): Label noise ratio. Default is 0.5.
box_noise_scale (float): Box noise scale. Default is 1.0.
learnt_init_query (bool): Whether to learn initial query embeddings. Default is False.
"""
super().__init__()
self.hidden_dim = hd
self.nhead = nh
self.nl = len(ch) # num level
self.nc = nc
self.num_queries = nq
self.num_decoder_layers = ndl
# Backbone feature projection
self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch)
# NOTE: simplified version but it's not consistent with .pt weights.
# self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch)
# Transformer module
decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp)
self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx)
# Denoising part
self.denoising_class_embed = nn.Embedding(nc, hd)
self.num_denoising = nd
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
# Decoder embedding
self.learnt_init_query = learnt_init_query
if learnt_init_query:
self.tgt_embed = nn.Embedding(nq, hd)
self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2)
# Encoder head
self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd))
self.enc_score_head = nn.Linear(hd, nc)
self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3)
# Decoder head
self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)])
self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)])
self._reset_parameters()
def forward(self, x, batch=None):
"""Runs the forward pass of the module, returning bounding box and classification scores for the input."""
from ultralytics.models.utils.ops import get_cdn_group
# Input projection and embedding
feats, shapes = self._get_encoder_input(x)
# Prepare denoising training
dn_embed, dn_bbox, attn_mask, dn_meta = get_cdn_group(
batch,
self.nc,
self.num_queries,
self.denoising_class_embed.weight,
self.num_denoising,
self.label_noise_ratio,
self.box_noise_scale,
self.training,
)
embed, refer_bbox, enc_bboxes, enc_scores = self._get_decoder_input(feats, shapes, dn_embed, dn_bbox)
# Decoder
dec_bboxes, dec_scores = self.decoder(
embed,
refer_bbox,
feats,
shapes,
self.dec_bbox_head,
self.dec_score_head,
self.query_pos_head,
attn_mask=attn_mask,
)
x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta
if self.training:
return x
# (bs, 300, 4+nc)
y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1)
return y if self.export else (y, x)
def _generate_anchors(self, shapes, grid_size=0.05, dtype=torch.float32, device="cpu", eps=1e-2):
"""Generates anchor bounding boxes for given shapes with specific grid size and validates them."""
anchors = []
for i, (h, w) in enumerate(shapes):
sy = torch.arange(end=h, dtype=dtype, device=device)
sx = torch.arange(end=w, dtype=dtype, device=device)
grid_y, grid_x = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx)
grid_xy = torch.stack([grid_x, grid_y], -1) # (h, w, 2)
valid_WH = torch.tensor([w, h], dtype=dtype, device=device)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH # (1, h, w, 2)
wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**i)
anchors.append(torch.cat([grid_xy, wh], -1).view(-1, h * w, 4)) # (1, h*w, 4)
anchors = torch.cat(anchors, 1) # (1, h*w*nl, 4)
valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True) # 1, h*w*nl, 1
anchors = torch.log(anchors / (1 - anchors))
anchors = anchors.masked_fill(~valid_mask, float("inf"))
return anchors, valid_mask
def _get_encoder_input(self, x):
"""Processes and returns encoder inputs by getting projection features from input and concatenating them."""
# Get projection features
x = [self.input_proj[i](feat) for i, feat in enumerate(x)]
# Get encoder inputs
feats = []
shapes = []
for feat in x:
h, w = feat.shape[2:]
# [b, c, h, w] -> [b, h*w, c]
feats.append(feat.flatten(2).permute(0, 2, 1))
# [nl, 2]
shapes.append([h, w])
# [b, h*w, c]
feats = torch.cat(feats, 1)
return feats, shapes
def _get_decoder_input(self, feats, shapes, dn_embed=None, dn_bbox=None):
"""Generates and prepares the input required for the decoder from the provided features and shapes."""
bs = feats.shape[0]
# Prepare input for decoder
anchors, valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device)
features = self.enc_output(valid_mask * feats) # bs, h*w, 256
enc_outputs_scores = self.enc_score_head(features) # (bs, h*w, nc)
# Query selection
# (bs, num_queries)
topk_ind = torch.topk(enc_outputs_scores.max(-1).values, self.num_queries, dim=1).indices.view(-1)
# (bs, num_queries)
batch_ind = torch.arange(end=bs, dtype=topk_ind.dtype).unsqueeze(-1).repeat(1, self.num_queries).view(-1)
# (bs, num_queries, 256)
top_k_features = features[batch_ind, topk_ind].view(bs, self.num_queries, -1)
# (bs, num_queries, 4)
top_k_anchors = anchors[:, topk_ind].view(bs, self.num_queries, -1)
# Dynamic anchors + static content
refer_bbox = self.enc_bbox_head(top_k_features) + top_k_anchors
enc_bboxes = refer_bbox.sigmoid()
if dn_bbox is not None:
refer_bbox = torch.cat([dn_bbox, refer_bbox], 1)
enc_scores = enc_outputs_scores[batch_ind, topk_ind].view(bs, self.num_queries, -1)
embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(bs, 1, 1) if self.learnt_init_query else top_k_features
if self.training:
refer_bbox = refer_bbox.detach()
if not self.learnt_init_query:
embeddings = embeddings.detach()
if dn_embed is not None:
embeddings = torch.cat([dn_embed, embeddings], 1)
return embeddings, refer_bbox, enc_bboxes, enc_scores
# TODO
def _reset_parameters(self):
"""Initializes or resets the parameters of the model's various components with predefined weights and biases."""
# Class and bbox head init
bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
# NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
# linear_init(self.enc_score_head)
constant_(self.enc_score_head.bias, bias_cls)
constant_(self.enc_bbox_head.layers[-1].weight, 0.0)
constant_(self.enc_bbox_head.layers[-1].bias, 0.0)
for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
# linear_init(cls_)
constant_(cls_.bias, bias_cls)
constant_(reg_.layers[-1].weight, 0.0)
constant_(reg_.layers[-1].bias, 0.0)
linear_init(self.enc_output[0])
xavier_uniform_(self.enc_output[0].weight)
if self.learnt_init_query:
xavier_uniform_(self.tgt_embed.weight)
xavier_uniform_(self.query_pos_head.layers[0].weight)
xavier_uniform_(self.query_pos_head.layers[1].weight)
for layer in self.input_proj:
xavier_uniform_(layer[0].weight)
| 21,728 | Python | .py | 405 | 44.162963 | 120 | 0.598324 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,907 | auth.py | arojsubedi_Improved-YOLOv8s/ultralytics/hub/auth.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import requests
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab
API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
class Auth:
"""
Manages authentication processes including API key handling, cookie-based authentication, and header generation.
The class supports different methods of authentication:
1. Directly using an API key.
2. Authenticating using browser cookies (specifically in Google Colab).
3. Prompting the user to enter an API key.
Attributes:
id_token (str or bool): Token used for identity verification, initialized as False.
api_key (str or bool): API key for authentication, initialized as False.
model_key (bool): Placeholder for model key, initialized as False.
"""
id_token = api_key = model_key = False
def __init__(self, api_key="", verbose=False):
"""
Initialize the Auth class with an optional API key.
Args:
api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id
"""
# Split the input API key in case it contains a combined key_model and keep only the API key part
api_key = api_key.split("_")[0]
# Set API key attribute as value passed or SETTINGS API key if none passed
self.api_key = api_key or SETTINGS.get("api_key", "")
# If an API key is provided
if self.api_key:
# If the provided API key matches the API key in the SETTINGS
if self.api_key == SETTINGS.get("api_key"):
# Log that the user is already logged in
if verbose:
LOGGER.info(f"{PREFIX}Authenticated ✅")
return
else:
# Attempt to authenticate with the provided API key
success = self.authenticate()
# If the API key is not provided and the environment is a Google Colab notebook
elif is_colab():
# Attempt to authenticate using browser cookies
success = self.auth_with_cookies()
else:
# Request an API key
success = self.request_api_key()
# Update SETTINGS with the new API key after successful authentication
if success:
SETTINGS.update({"api_key": self.api_key})
# Log that the new login was successful
if verbose:
LOGGER.info(f"{PREFIX}New authentication successful ✅")
elif verbose:
LOGGER.info(f"{PREFIX}Retrieve API key from {API_KEY_URL}")
def request_api_key(self, max_attempts=3):
"""
Prompt the user to input their API key.
Returns the model ID.
"""
import getpass
for attempts in range(max_attempts):
LOGGER.info(f"{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}")
input_key = getpass.getpass(f"Enter API key from {API_KEY_URL} ")
self.api_key = input_key.split("_")[0] # remove model id if present
if self.authenticate():
return True
raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate �"))
def authenticate(self) -> bool:
"""
Attempt to authenticate with the server using either id_token or API key.
Returns:
(bool): True if authentication is successful, False otherwise.
"""
try:
if header := self.get_auth_header():
r = requests.post(f"{HUB_API_ROOT}/v1/auth", headers=header)
if not r.json().get("success", False):
raise ConnectionError("Unable to authenticate.")
return True
raise ConnectionError("User has not authenticated locally.")
except ConnectionError:
self.id_token = self.api_key = False # reset invalid
LOGGER.warning(f"{PREFIX}Invalid API key ⚠�")
return False
def auth_with_cookies(self) -> bool:
"""
Attempt to fetch authentication via cookies and set id_token. User must be logged in to HUB and running in a
supported browser.
Returns:
(bool): True if authentication is successful, False otherwise.
"""
if not is_colab():
return False # Currently only works with Colab
try:
authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")
if authn.get("success", False):
self.id_token = authn.get("data", {}).get("idToken", None)
self.authenticate()
return True
raise ConnectionError("Unable to fetch browser authentication details.")
except ConnectionError:
self.id_token = False # reset invalid
return False
def get_auth_header(self):
"""
Get the authentication header for making API requests.
Returns:
(dict): The authentication header if id_token or API key is set, None otherwise.
"""
if self.id_token:
return {"authorization": f"Bearer {self.id_token}"}
elif self.api_key:
return {"x-api-key": self.api_key}
# else returns None
| 5,370 | Python | .py | 114 | 36.578947 | 116 | 0.615017 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,908 | utils.py | arojsubedi_Improved-YOLOv8s/ultralytics/hub/utils.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import os
import platform
import random
import sys
import threading
import time
from pathlib import Path
import requests
from ultralytics.utils import (
ENVIRONMENT,
LOGGER,
ONLINE,
RANK,
SETTINGS,
TESTS_RUNNING,
TQDM,
TryExcept,
__version__,
colorstr,
get_git_origin_url,
is_colab,
is_git_dir,
is_pip_package,
)
from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES
HUB_API_ROOT = os.environ.get("ULTRALYTICS_HUB_API", "https://api.ultralytics.com")
HUB_WEB_ROOT = os.environ.get("ULTRALYTICS_HUB_WEB", "https://hub.ultralytics.com")
PREFIX = colorstr("Ultralytics HUB: ")
HELP_MSG = "If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance."
def request_with_credentials(url: str) -> any:
"""
Make an AJAX request with cookies attached in a Google Colab environment.
Args:
url (str): The URL to make the request to.
Returns:
(any): The response data from the AJAX request.
Raises:
OSError: If the function is not run in a Google Colab environment.
"""
if not is_colab():
raise OSError("request_with_credentials() must run in a Colab environment")
from google.colab import output # noqa
from IPython import display # noqa
display.display(
display.Javascript(
"""
window._hub_tmp = new Promise((resolve, reject) => {
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("%s", {
method: 'POST',
credentials: 'include'
})
.then((response) => resolve(response.json()))
.then((json) => {
clearTimeout(timeout);
}).catch((err) => {
clearTimeout(timeout);
reject(err);
});
});
"""
% url
)
)
return output.eval_js("_hub_tmp")
def requests_with_progress(method, url, **kwargs):
"""
Make an HTTP request using the specified method and URL, with an optional progress bar.
Args:
method (str): The HTTP method to use (e.g. 'GET', 'POST').
url (str): The URL to send the request to.
**kwargs (dict): Additional keyword arguments to pass to the underlying `requests.request` function.
Returns:
(requests.Response): The response object from the HTTP request.
Note:
- If 'progress' is set to True, the progress bar will display the download progress for responses with a known
content length.
- If 'progress' is a number then progress bar will display assuming content length = progress.
"""
progress = kwargs.pop("progress", False)
if not progress:
return requests.request(method, url, **kwargs)
response = requests.request(method, url, stream=True, **kwargs)
total = int(response.headers.get("content-length", 0) if isinstance(progress, bool) else progress) # total size
try:
pbar = TQDM(total=total, unit="B", unit_scale=True, unit_divisor=1024)
for data in response.iter_content(chunk_size=1024):
pbar.update(len(data))
pbar.close()
except requests.exceptions.ChunkedEncodingError: # avoid 'Connection broken: IncompleteRead' warnings
response.close()
return response
def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, progress=False, **kwargs):
"""
Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
Args:
method (str): The HTTP method to use for the request. Choices are 'post' and 'get'.
url (str): The URL to make the request to.
retry (int, optional): Number of retries to attempt before giving up. Default is 3.
timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30.
thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True.
code (int, optional): An identifier for the request, used for logging purposes. Default is -1.
verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True.
progress (bool, optional): Whether to show a progress bar during the request. Default is False.
**kwargs (dict): Keyword arguments to be passed to the requests function specified in method.
Returns:
(requests.Response): The HTTP response object. If the request is executed in a separate thread, returns None.
"""
retry_codes = (408, 500) # retry only these codes
@TryExcept(verbose=verbose)
def func(func_method, func_url, **func_kwargs):
"""Make HTTP requests with retries and timeouts, with optional progress tracking."""
r = None # response
t0 = time.time() # initial time for timer
for i in range(retry + 1):
if (time.time() - t0) > timeout:
break
r = requests_with_progress(func_method, func_url, **func_kwargs) # i.e. get(url, data, json, files)
if r.status_code < 300: # return codes in the 2xx range are generally considered "good" or "successful"
break
try:
m = r.json().get("message", "No JSON message.")
except AttributeError:
m = "Unable to read JSON."
if i == 0:
if r.status_code in retry_codes:
m += f" Retrying {retry}x for {timeout}s." if retry else ""
elif r.status_code == 429: # rate limit
h = r.headers # response headers
m = (
f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). "
f"Please retry after {h['Retry-After']}s."
)
if verbose:
LOGGER.warning(f"{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})")
if r.status_code not in retry_codes:
return r
time.sleep(2**i) # exponential standoff
return r
args = method, url
kwargs["progress"] = progress
if thread:
threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
else:
return func(*args, **kwargs)
class Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when sync=True in settings and
disabled when sync=False. Run 'yolo settings' to see and update settings YAML file.
Attributes:
url (str): The URL to send anonymous events.
rate_limit (float): The rate limit in seconds for sending events.
metadata (dict): A dictionary containing metadata about the environment.
enabled (bool): A flag to enable or disable Events based on certain conditions.
"""
url = "https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw"
def __init__(self):
"""Initializes the Events object with default values for events, rate_limit, and metadata."""
self.events = [] # events list
self.rate_limit = 60.0 # rate limit (seconds)
self.t = 0.0 # rate limit timer (seconds)
self.metadata = {
"cli": Path(sys.argv[0]).name == "yolo",
"install": "git" if is_git_dir() else "pip" if is_pip_package() else "other",
"python": ".".join(platform.python_version_tuple()[:2]), # i.e. 3.10
"version": __version__,
"env": ENVIRONMENT,
"session_id": round(random.random() * 1e15),
"engagement_time_msec": 1000,
}
self.enabled = (
SETTINGS["sync"]
and RANK in (-1, 0)
and not TESTS_RUNNING
and ONLINE
and (is_pip_package() or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
)
def __call__(self, cfg):
"""
Attempts to add a new event to the events list and send events if the rate limit is reached.
Args:
cfg (IterableSimpleNamespace): The configuration object containing mode and task information.
"""
if not self.enabled:
# Events disabled, do nothing
return
# Attempt to add to events
if len(self.events) < 25: # Events list limited to 25 events (drop any events past this)
params = {
**self.metadata,
"task": cfg.task,
"model": cfg.model if cfg.model in GITHUB_ASSETS_NAMES else "custom",
}
if cfg.mode == "export":
params["format"] = cfg.format
self.events.append({"name": cfg.mode, "params": params})
# Check rate limit
t = time.time()
if (t - self.t) < self.rate_limit:
# Time is under rate limiter, wait to send
return
# Time is over rate limiter, send now
data = {"client_id": SETTINGS["uuid"], "events": self.events} # SHA-256 anonymized UUID hash and events list
# POST equivalent to requests.post(self.url, json=data)
smart_request("post", self.url, json=data, retry=0, verbose=False)
# Reset events and rate limit timer
self.events = []
self.t = t
# Run below code on hub/utils init -------------------------------------------------------------------------------------
events = Events()
| 9,736 | Python | .py | 211 | 36.725118 | 120 | 0.60744 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,909 | __init__.py | arojsubedi_Improved-YOLOv8s/ultralytics/hub/__init__.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import requests
from ultralytics.data.utils import HUBDatasetStats
from ultralytics.hub.auth import Auth
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX
from ultralytics.utils import LOGGER, SETTINGS, checks
def login(api_key: str = None, save=True) -> bool:
"""
Log in to the Ultralytics HUB API using the provided API key.
The session is not stored; a new session is created when needed using the saved SETTINGS or the HUB_API_KEY
environment variable if successfully authenticated.
Args:
api_key (str, optional): API key to use for authentication.
If not provided, it will be retrieved from SETTINGS or HUB_API_KEY environment variable.
save (bool, optional): Whether to save the API key to SETTINGS if authentication is successful.
Returns:
(bool): True if authentication is successful, False otherwise.
"""
checks.check_requirements("hub-sdk>=0.0.2")
from hub_sdk import HUBClient
api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys" # set the redirect URL
saved_key = SETTINGS.get("api_key")
active_key = api_key or saved_key
credentials = {"api_key": active_key} if active_key and active_key != "" else None # set credentials
client = HUBClient(credentials) # initialize HUBClient
if client.authenticated:
# Successfully authenticated with HUB
if save and client.api_key != saved_key:
SETTINGS.update({"api_key": client.api_key}) # update settings with valid API key
# Set message based on whether key was provided or retrieved from settings
log_message = (
"New authentication successful ✅" if client.api_key == api_key or not credentials else "Authenticated ✅"
)
LOGGER.info(f"{PREFIX}{log_message}")
return True
else:
# Failed to authenticate with HUB
LOGGER.info(f"{PREFIX}Retrieve API key from {api_key_url}")
return False
def logout():
"""
Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo hub login'.
Example:
```python
from ultralytics import hub
hub.logout()
```
"""
SETTINGS["api_key"] = ""
SETTINGS.save()
LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo hub login'.")
def reset_model(model_id=""):
"""Reset a trained model to an untrained state."""
r = requests.post(f"{HUB_API_ROOT}/model-reset", json={"modelId": model_id}, headers={"x-api-key": Auth().api_key})
if r.status_code == 200:
LOGGER.info(f"{PREFIX}Model reset successfully")
return
LOGGER.warning(f"{PREFIX}Model reset failure {r.status_code} {r.reason}")
def export_fmts_hub():
"""Returns a list of HUB-supported export formats."""
from ultralytics.engine.exporter import export_formats
return list(export_formats()["Argument"][1:]) + ["ultralytics_tflite", "ultralytics_coreml"]
def export_model(model_id="", format="torchscript"):
"""Export a model to all formats."""
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
r = requests.post(
f"{HUB_API_ROOT}/v1/models/{model_id}/export", json={"format": format}, headers={"x-api-key": Auth().api_key}
)
assert r.status_code == 200, f"{PREFIX}{format} export failure {r.status_code} {r.reason}"
LOGGER.info(f"{PREFIX}{format} export started ✅")
def get_export(model_id="", format="torchscript"):
"""Get an exported model dictionary with download URL."""
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
r = requests.post(
f"{HUB_API_ROOT}/get-export",
json={"apiKey": Auth().api_key, "modelId": model_id, "format": format},
headers={"x-api-key": Auth().api_key},
)
assert r.status_code == 200, f"{PREFIX}{format} get_export failure {r.status_code} {r.reason}"
return r.json()
def check_dataset(path="", task="detect"):
"""
Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is uploaded
to the HUB. Usage examples are given below.
Args:
path (str, optional): Path to data.zip (with data.yaml inside data.zip). Defaults to ''.
task (str, optional): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Defaults to 'detect'.
Example:
```python
from ultralytics.hub import check_dataset
check_dataset('path/to/coco8.zip', task='detect') # detect dataset
check_dataset('path/to/coco8-seg.zip', task='segment') # segment dataset
check_dataset('path/to/coco8-pose.zip', task='pose') # pose dataset
```
"""
HUBDatasetStats(path=path, task=task).get_json()
LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.")
| 5,035 | Python | .py | 97 | 45.597938 | 120 | 0.677603 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,910 | session.py | arojsubedi_Improved-YOLOv8s/ultralytics/hub/session.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import threading
import time
from http import HTTPStatus
from pathlib import Path
import requests
from ultralytics.hub.utils import HUB_WEB_ROOT, HELP_MSG, PREFIX, TQDM
from ultralytics.utils import LOGGER, SETTINGS, __version__, checks, emojis, is_colab
from ultralytics.utils.errors import HUBModelError
AGENT_NAME = f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local"
class HUBTrainingSession:
"""
HUB training session for Ultralytics HUB YOLO models. Handles model initialization, heartbeats, and checkpointing.
Attributes:
agent_id (str): Identifier for the instance communicating with the server.
model_id (str): Identifier for the YOLO model being trained.
model_url (str): URL for the model in Ultralytics HUB.
api_url (str): API URL for the model in Ultralytics HUB.
auth_header (dict): Authentication header for the Ultralytics HUB API requests.
rate_limits (dict): Rate limits for different API calls (in seconds).
timers (dict): Timers for rate limiting.
metrics_queue (dict): Queue for the model's metrics.
model (dict): Model data fetched from Ultralytics HUB.
alive (bool): Indicates if the heartbeat loop is active.
"""
def __init__(self, identifier):
"""
Initialize the HUBTrainingSession with the provided model identifier.
Args:
identifier (str): Model identifier used to initialize the HUB training session.
It can be a URL string or a model key with specific format.
Raises:
ValueError: If the provided model identifier is invalid.
ConnectionError: If connecting with global API key is not supported.
ModuleNotFoundError: If hub-sdk package is not installed.
"""
from hub_sdk import HUBClient
self.rate_limits = {
"metrics": 3.0,
"ckpt": 900.0,
"heartbeat": 300.0,
} # rate limits (seconds)
self.metrics_queue = {} # holds metrics for each epoch until upload
self.timers = {} # holds timers in ultralytics/utils/callbacks/hub.py
# Parse input
api_key, model_id, self.filename = self._parse_identifier(identifier)
# Get credentials
active_key = api_key or SETTINGS.get("api_key")
credentials = {"api_key": active_key} if active_key else None # set credentials
# Initialize client
self.client = HUBClient(credentials)
if model_id:
self.load_model(model_id) # load existing model
else:
self.model = self.client.model() # load empty model
def load_model(self, model_id):
"""Loads an existing model from Ultralytics HUB using the provided model identifier."""
self.model = self.client.model(model_id)
if not self.model.data: # then model does not exist
raise ValueError(emojis("� The specified HUB model does not exist")) # TODO: improve error handling
self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
self._set_train_args()
# Start heartbeats for HUB to monitor agent
self.model.start_heartbeat(self.rate_limits["heartbeat"])
LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀")
def create_model(self, model_args):
"""Initializes a HUB training session with the specified model identifier."""
payload = {
"config": {
"batchSize": model_args.get("batch", -1),
"epochs": model_args.get("epochs", 300),
"imageSize": model_args.get("imgsz", 640),
"patience": model_args.get("patience", 100),
"device": model_args.get("device", ""),
"cache": model_args.get("cache", "ram"),
},
"dataset": {"name": model_args.get("data")},
"lineage": {
"architecture": {
"name": self.filename.replace(".pt", "").replace(".yaml", ""),
},
"parent": {},
},
"meta": {"name": self.filename},
}
if self.filename.endswith(".pt"):
payload["lineage"]["parent"]["name"] = self.filename
self.model.create_model(payload)
# Model could not be created
# TODO: improve error handling
if not self.model.id:
return
self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
# Start heartbeats for HUB to monitor agent
self.model.start_heartbeat(self.rate_limits["heartbeat"])
LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀")
def _parse_identifier(self, identifier):
"""
Parses the given identifier to determine the type of identifier and extract relevant components.
The method supports different identifier formats:
- A HUB URL, which starts with HUB_WEB_ROOT followed by '/models/'
- An identifier containing an API key and a model ID separated by an underscore
- An identifier that is solely a model ID of a fixed length
- A local filename that ends with '.pt' or '.yaml'
Args:
identifier (str): The identifier string to be parsed.
Returns:
(tuple): A tuple containing the API key, model ID, and filename as applicable.
Raises:
HUBModelError: If the identifier format is not recognized.
"""
# Initialize variables
api_key, model_id, filename = None, None, None
# Check if identifier is a HUB URL
if identifier.startswith(f"{HUB_WEB_ROOT}/models/"):
# Extract the model_id after the HUB_WEB_ROOT URL
model_id = identifier.split(f"{HUB_WEB_ROOT}/models/")[-1]
else:
# Split the identifier based on underscores only if it's not a HUB URL
parts = identifier.split("_")
# Check if identifier is in the format of API key and model ID
if len(parts) == 2 and len(parts[0]) == 42 and len(parts[1]) == 20:
api_key, model_id = parts
# Check if identifier is a single model ID
elif len(parts) == 1 and len(parts[0]) == 20:
model_id = parts[0]
# Check if identifier is a local filename
elif identifier.endswith(".pt") or identifier.endswith(".yaml"):
filename = identifier
else:
raise HUBModelError(
f"model='{identifier}' could not be parsed. Check format is correct. "
f"Supported formats are Ultralytics HUB URL, apiKey_modelId, modelId, local pt or yaml file."
)
return api_key, model_id, filename
def _set_train_args(self, **kwargs):
"""Initializes training arguments and creates a model entry on the Ultralytics HUB."""
if self.model.is_trained():
# Model is already trained
raise ValueError(emojis(f"Model is already trained and uploaded to {self.model_url} 🚀"))
if self.model.is_resumable():
# Model has saved weights
self.train_args = {"data": self.model.get_dataset_url(), "resume": True}
self.model_file = self.model.get_weights_url("last")
else:
# Model has no saved weights
def get_train_args(config):
"""Parses an identifier to extract API key, model ID, and filename if applicable."""
return {
"batch": config["batchSize"],
"epochs": config["epochs"],
"imgsz": config["imageSize"],
"patience": config["patience"],
"device": config["device"],
"cache": config["cache"],
"data": self.model.get_dataset_url(),
}
self.train_args = get_train_args(self.model.data.get("config"))
# Set the model file as either a *.pt or *.yaml file
self.model_file = (
self.model.get_weights_url("parent") if self.model.is_pretrained() else self.model.get_architecture()
)
if not self.train_args.get("data"):
raise ValueError("Dataset may still be processing. Please wait a minute and try again.") # RF fix
self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False) # YOLOv5->YOLOv5u
self.model_id = self.model.id
def request_queue(
self,
request_func,
retry=3,
timeout=30,
thread=True,
verbose=True,
progress_total=None,
*args,
**kwargs,
):
def retry_request():
"""Attempts to call `request_func` with retries, timeout, and optional threading."""
t0 = time.time() # Record the start time for the timeout
for i in range(retry + 1):
if (time.time() - t0) > timeout:
LOGGER.warning(f"{PREFIX}Timeout for request reached. {HELP_MSG}")
break # Timeout reached, exit loop
response = request_func(*args, **kwargs)
if response is None:
LOGGER.warning(f"{PREFIX}Received no response from the request. {HELP_MSG}")
time.sleep(2**i) # Exponential backoff before retrying
continue # Skip further processing and retry
if progress_total:
self._show_upload_progress(progress_total, response)
if HTTPStatus.OK <= response.status_code < HTTPStatus.MULTIPLE_CHOICES:
return response # Success, no need to retry
if i == 0:
# Initial attempt, check status code and provide messages
message = self._get_failure_message(response, retry, timeout)
if verbose:
LOGGER.warning(f"{PREFIX}{message} {HELP_MSG} ({response.status_code})")
if not self._should_retry(response.status_code):
LOGGER.warning(f"{PREFIX}Request failed. {HELP_MSG} ({response.status_code}")
break # Not an error that should be retried, exit loop
time.sleep(2**i) # Exponential backoff for retries
return response
if thread:
# Start a new thread to run the retry_request function
threading.Thread(target=retry_request, daemon=True).start()
else:
# If running in the main thread, call retry_request directly
return retry_request()
def _should_retry(self, status_code):
"""Determines if a request should be retried based on the HTTP status code."""
retry_codes = {
HTTPStatus.REQUEST_TIMEOUT,
HTTPStatus.BAD_GATEWAY,
HTTPStatus.GATEWAY_TIMEOUT,
}
return status_code in retry_codes
def _get_failure_message(self, response: requests.Response, retry: int, timeout: int):
"""
Generate a retry message based on the response status code.
Args:
response: The HTTP response object.
retry: The number of retry attempts allowed.
timeout: The maximum timeout duration.
Returns:
(str): The retry message.
"""
if self._should_retry(response.status_code):
return f"Retrying {retry}x for {timeout}s." if retry else ""
elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS: # rate limit
headers = response.headers
return (
f"Rate limit reached ({headers['X-RateLimit-Remaining']}/{headers['X-RateLimit-Limit']}). "
f"Please retry after {headers['Retry-After']}s."
)
else:
try:
return response.json().get("message", "No JSON message.")
except AttributeError:
return "Unable to read JSON."
def upload_metrics(self):
"""Upload model metrics to Ultralytics HUB."""
return self.request_queue(self.model.upload_metrics, metrics=self.metrics_queue.copy(), thread=True)
def upload_model(
self,
epoch: int,
weights: str,
is_best: bool = False,
map: float = 0.0,
final: bool = False,
) -> None:
"""
Upload a model checkpoint to Ultralytics HUB.
Args:
epoch (int): The current training epoch.
weights (str): Path to the model weights file.
is_best (bool): Indicates if the current model is the best one so far.
map (float): Mean average precision of the model.
final (bool): Indicates if the model is the final model after training.
"""
if Path(weights).is_file():
progress_total = Path(weights).stat().st_size if final else None # Only show progress if final
self.request_queue(
self.model.upload_model,
epoch=epoch,
weights=weights,
is_best=is_best,
map=map,
final=final,
retry=10,
timeout=3600,
thread=not final,
progress_total=progress_total,
)
else:
LOGGER.warning(f"{PREFIX}WARNING ⚠� Model upload issue. Missing model {weights}.")
def _show_upload_progress(self, content_length: int, response: requests.Response) -> None:
"""
Display a progress bar to track the upload progress of a file download.
Args:
content_length (int): The total size of the content to be downloaded in bytes.
response (requests.Response): The response object from the file download request.
Returns:
None
"""
with TQDM(total=content_length, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response.iter_content(chunk_size=1024):
pbar.update(len(data))
| 14,226 | Python | .py | 288 | 37.451389 | 118 | 0.593818 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,911 | distance_calculation.py | arojsubedi_Improved-YOLOv8s/ultralytics/solutions/distance_calculation.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import math
import cv2
from ultralytics.utils.checks import check_imshow
from ultralytics.utils.plotting import Annotator, colors
class DistanceCalculation:
"""A class to calculate distance between two objects in real-time video stream based on their tracks."""
def __init__(self):
"""Initializes the distance calculation class with default values for Visual, Image, track and distance
parameters.
"""
# Visual & im0 information
self.im0 = None
self.annotator = None
self.view_img = False
self.line_color = (255, 255, 0)
self.centroid_color = (255, 0, 255)
# Predict/track information
self.clss = None
self.names = None
self.boxes = None
self.line_thickness = 2
self.trk_ids = None
# Distance calculation information
self.centroids = []
self.pixel_per_meter = 10
# Mouse event
self.left_mouse_count = 0
self.selected_boxes = {}
# Check if environment support imshow
self.env_check = check_imshow(warn=True)
def set_args(
self,
names,
pixels_per_meter=10,
view_img=False,
line_thickness=2,
line_color=(255, 255, 0),
centroid_color=(255, 0, 255),
):
"""
Configures the distance calculation and display parameters.
Args:
names (dict): object detection classes names
pixels_per_meter (int): Number of pixels in meter
view_img (bool): Flag indicating frame display
line_thickness (int): Line thickness for bounding boxes.
line_color (RGB): color of centroids line
centroid_color (RGB): colors of bbox centroids
"""
self.names = names
self.pixel_per_meter = pixels_per_meter
self.view_img = view_img
self.line_thickness = line_thickness
self.line_color = line_color
self.centroid_color = centroid_color
def mouse_event_for_distance(self, event, x, y, flags, param):
"""
This function is designed to move region with mouse events in a real-time video stream.
Args:
event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
x (int): The x-coordinate of the mouse pointer.
y (int): The y-coordinate of the mouse pointer.
flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY,
cv2.EVENT_FLAG_SHIFTKEY, etc.).
param (dict): Additional parameters you may want to pass to the function.
"""
global selected_boxes
global left_mouse_count
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
if self.left_mouse_count <= 2:
for box, track_id in zip(self.boxes, self.trk_ids):
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
self.selected_boxes[track_id] = []
self.selected_boxes[track_id] = box
if event == cv2.EVENT_RBUTTONDOWN:
self.selected_boxes = {}
self.left_mouse_count = 0
def extract_tracks(self, tracks):
"""
Extracts results from the provided data.
Args:
tracks (list): List of tracks obtained from the object tracking process.
"""
self.boxes = tracks[0].boxes.xyxy.cpu()
self.clss = tracks[0].boxes.cls.cpu().tolist()
self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
def calculate_centroid(self, box):
"""
Calculate the centroid of bounding box.
Args:
box (list): Bounding box data
"""
return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)
def calculate_distance(self, centroid1, centroid2):
"""
Calculate distance between two centroids.
Args:
centroid1 (point): First bounding box data
centroid2 (point): Second bounding box data
"""
pixel_distance = math.sqrt((centroid1[0] - centroid2[0]) ** 2 + (centroid1[1] - centroid2[1]) ** 2)
return pixel_distance / self.pixel_per_meter, (pixel_distance / self.pixel_per_meter) * 1000
def start_process(self, im0, tracks):
"""
Calculate distance between two bounding boxes based on tracking data.
Args:
im0 (nd array): Image
tracks (list): List of tracks obtained from the object tracking process.
"""
self.im0 = im0
if tracks[0].boxes.id is None:
if self.view_img:
self.display_frames()
return
self.extract_tracks(tracks)
self.annotator = Annotator(self.im0, line_width=2)
for box, cls, track_id in zip(self.boxes, self.clss, self.trk_ids):
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
if len(self.selected_boxes) == 2:
for trk_id, _ in self.selected_boxes.items():
if trk_id == track_id:
self.selected_boxes[track_id] = box
if len(self.selected_boxes) == 2:
for trk_id, box in self.selected_boxes.items():
centroid = self.calculate_centroid(self.selected_boxes[trk_id])
self.centroids.append(centroid)
distance_m, distance_mm = self.calculate_distance(self.centroids[0], self.centroids[1])
self.annotator.plot_distance_and_line(
distance_m, distance_mm, self.centroids, self.line_color, self.centroid_color
)
self.centroids = []
if self.view_img and self.env_check:
self.display_frames()
return im0
def display_frames(self):
"""Display frame."""
cv2.namedWindow("Ultralytics Distance Estimation")
cv2.setMouseCallback("Ultralytics Distance Estimation", self.mouse_event_for_distance)
cv2.imshow("Ultralytics Distance Estimation", self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
if __name__ == "__main__":
DistanceCalculation()
| 6,334 | Python | .py | 145 | 33.386207 | 111 | 0.599707 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,912 | speed_estimation.py | arojsubedi_Improved-YOLOv8s/ultralytics/solutions/speed_estimation.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import defaultdict
from time import time
import cv2
import numpy as np
from ultralytics.utils.checks import check_imshow
from ultralytics.utils.plotting import Annotator, colors
class SpeedEstimator:
"""A class to estimation speed of objects in real-time video stream based on their tracks."""
def __init__(self):
"""Initializes the speed-estimator class with default values for Visual, Image, track and speed parameters."""
# Visual & im0 information
self.im0 = None
self.annotator = None
self.view_img = False
# Region information
self.reg_pts = [(20, 400), (1260, 400)]
self.region_thickness = 3
# Predict/track information
self.clss = None
self.names = None
self.boxes = None
self.trk_ids = None
self.trk_pts = None
self.line_thickness = 2
self.trk_history = defaultdict(list)
# Speed estimator information
self.current_time = 0
self.dist_data = {}
self.trk_idslist = []
self.spdl_dist_thresh = 10
self.trk_previous_times = {}
self.trk_previous_points = {}
# Check if environment support imshow
self.env_check = check_imshow(warn=True)
def set_args(
self,
reg_pts,
names,
view_img=False,
line_thickness=2,
region_thickness=5,
spdl_dist_thresh=10,
):
"""
Configures the speed estimation and display parameters.
Args:
reg_pts (list): Initial list of points defining the speed calculation region.
names (dict): object detection classes names
view_img (bool): Flag indicating frame display
line_thickness (int): Line thickness for bounding boxes.
region_thickness (int): Speed estimation region thickness
spdl_dist_thresh (int): Euclidean distance threshold for speed line
"""
if reg_pts is None:
print("Region points not provided, using default values")
else:
self.reg_pts = reg_pts
self.names = names
self.view_img = view_img
self.line_thickness = line_thickness
self.region_thickness = region_thickness
self.spdl_dist_thresh = spdl_dist_thresh
def extract_tracks(self, tracks):
"""
Extracts results from the provided data.
Args:
tracks (list): List of tracks obtained from the object tracking process.
"""
self.boxes = tracks[0].boxes.xyxy.cpu()
self.clss = tracks[0].boxes.cls.cpu().tolist()
self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
def store_track_info(self, track_id, box):
"""
Store track data.
Args:
track_id (int): object track id.
box (list): object bounding box data
"""
track = self.trk_history[track_id]
bbox_center = (float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2))
track.append(bbox_center)
if len(track) > 30:
track.pop(0)
self.trk_pts = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
return track
def plot_box_and_track(self, track_id, box, cls, track):
"""
Plot track and bounding box.
Args:
track_id (int): object track id.
box (list): object bounding box data
cls (str): object class name
track (list): tracking history for tracks path drawing
"""
speed_label = f"{int(self.dist_data[track_id])}km/ph" if track_id in self.dist_data else self.names[int(cls)]
bbox_color = colors(int(track_id)) if track_id in self.dist_data else (255, 0, 255)
self.annotator.box_label(box, speed_label, bbox_color)
cv2.polylines(self.im0, [self.trk_pts], isClosed=False, color=(0, 255, 0), thickness=1)
cv2.circle(self.im0, (int(track[-1][0]), int(track[-1][1])), 5, bbox_color, -1)
def calculate_speed(self, trk_id, track):
"""
Calculation of object speed.
Args:
trk_id (int): object track id.
track (list): tracking history for tracks path drawing
"""
if not self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]:
return
if self.reg_pts[1][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[1][1] + self.spdl_dist_thresh:
direction = "known"
elif self.reg_pts[0][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[0][1] + self.spdl_dist_thresh:
direction = "known"
else:
direction = "unknown"
if self.trk_previous_times[trk_id] != 0 and direction != "unknown" and trk_id not in self.trk_idslist:
self.trk_idslist.append(trk_id)
time_difference = time() - self.trk_previous_times[trk_id]
if time_difference > 0:
dist_difference = np.abs(track[-1][1] - self.trk_previous_points[trk_id][1])
speed = dist_difference / time_difference
self.dist_data[trk_id] = speed
self.trk_previous_times[trk_id] = time()
self.trk_previous_points[trk_id] = track[-1]
def estimate_speed(self, im0, tracks, region_color=(255, 0, 0)):
"""
Calculate object based on tracking data.
Args:
im0 (nd array): Image
tracks (list): List of tracks obtained from the object tracking process.
region_color (tuple): Color to use when drawing regions.
"""
self.im0 = im0
if tracks[0].boxes.id is None:
if self.view_img and self.env_check:
self.display_frames()
return im0
self.extract_tracks(tracks)
self.annotator = Annotator(self.im0, line_width=2)
self.annotator.draw_region(reg_pts=self.reg_pts, color=region_color, thickness=self.region_thickness)
for box, trk_id, cls in zip(self.boxes, self.trk_ids, self.clss):
track = self.store_track_info(trk_id, box)
if trk_id not in self.trk_previous_times:
self.trk_previous_times[trk_id] = 0
self.plot_box_and_track(trk_id, box, cls, track)
self.calculate_speed(trk_id, track)
if self.view_img and self.env_check:
self.display_frames()
return im0
def display_frames(self):
"""Display frame."""
cv2.imshow("Ultralytics Speed Estimation", self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
if __name__ == "__main__":
SpeedEstimator()
| 6,714 | Python | .py | 156 | 33.333333 | 118 | 0.599141 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,913 | object_counter.py | arojsubedi_Improved-YOLOv8s/ultralytics/solutions/object_counter.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import defaultdict
import cv2
from ultralytics.utils.checks import check_imshow, check_requirements
from ultralytics.utils.plotting import Annotator, colors
check_requirements("shapely>=2.0.0")
from shapely.geometry import LineString, Point, Polygon
class ObjectCounter:
"""A class to manage the counting of objects in a real-time video stream based on their tracks."""
def __init__(self):
"""Initializes the Counter with default values for various tracking and counting parameters."""
# Mouse events
self.is_drawing = False
self.selected_point = None
# Region & Line Information
self.reg_pts = [(20, 400), (1260, 400)]
self.line_dist_thresh = 15
self.counting_region = None
self.region_color = (255, 0, 255)
self.region_thickness = 5
# Image and annotation Information
self.im0 = None
self.tf = None
self.view_img = False
self.view_in_counts = True
self.view_out_counts = True
self.names = None # Classes names
self.annotator = None # Annotator
# Object counting Information
self.in_counts = 0
self.out_counts = 0
self.counting_list = []
self.count_txt_thickness = 0
self.count_txt_color = (0, 0, 0)
self.count_color = (255, 255, 255)
# Tracks info
self.track_history = defaultdict(list)
self.track_thickness = 2
self.draw_tracks = False
self.track_color = (0, 255, 0)
# Check if environment support imshow
self.env_check = check_imshow(warn=True)
def set_args(
self,
classes_names,
reg_pts,
count_reg_color=(255, 0, 255),
line_thickness=2,
track_thickness=2,
view_img=False,
view_in_counts=True,
view_out_counts=True,
draw_tracks=False,
count_txt_thickness=2,
count_txt_color=(0, 0, 0),
count_color=(255, 255, 255),
track_color=(0, 255, 0),
region_thickness=5,
line_dist_thresh=15,
):
"""
Configures the Counter's image, bounding box line thickness, and counting region points.
Args:
line_thickness (int): Line thickness for bounding boxes.
view_img (bool): Flag to control whether to display the video stream.
view_in_counts (bool): Flag to control whether to display the incounts on video stream.
view_out_counts (bool): Flag to control whether to display the outcounts on video stream.
reg_pts (list): Initial list of points defining the counting region.
classes_names (dict): Classes names
track_thickness (int): Track thickness
draw_tracks (Bool): draw tracks
count_txt_thickness (int): Text thickness for object counting display
count_txt_color (RGB color): count text color value
count_color (RGB color): count text background color value
count_reg_color (RGB color): Color of object counting region
track_color (RGB color): color for tracks
region_thickness (int): Object counting Region thickness
line_dist_thresh (int): Euclidean Distance threshold for line counter
"""
self.tf = line_thickness
self.view_img = view_img
self.view_in_counts = view_in_counts
self.view_out_counts = view_out_counts
self.track_thickness = track_thickness
self.draw_tracks = draw_tracks
# Region and line selection
if len(reg_pts) == 2:
print("Line Counter Initiated.")
self.reg_pts = reg_pts
self.counting_region = LineString(self.reg_pts)
elif len(reg_pts) == 4:
print("Region Counter Initiated.")
self.reg_pts = reg_pts
self.counting_region = Polygon(self.reg_pts)
else:
print("Invalid Region points provided, region_points can be 2 or 4")
print("Using Line Counter Now")
self.counting_region = LineString(self.reg_pts)
self.names = classes_names
self.track_color = track_color
self.count_txt_thickness = count_txt_thickness
self.count_txt_color = count_txt_color
self.count_color = count_color
self.region_color = count_reg_color
self.region_thickness = region_thickness
self.line_dist_thresh = line_dist_thresh
def mouse_event_for_region(self, event, x, y, flags, params):
"""
This function is designed to move region with mouse events in a real-time video stream.
Args:
event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
x (int): The x-coordinate of the mouse pointer.
y (int): The y-coordinate of the mouse pointer.
flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY,
cv2.EVENT_FLAG_SHIFTKEY, etc.).
params (dict): Additional parameters you may want to pass to the function.
"""
if event == cv2.EVENT_LBUTTONDOWN:
for i, point in enumerate(self.reg_pts):
if (
isinstance(point, (tuple, list))
and len(point) >= 2
and (abs(x - point[0]) < 10 and abs(y - point[1]) < 10)
):
self.selected_point = i
self.is_drawing = True
break
elif event == cv2.EVENT_MOUSEMOVE:
if self.is_drawing and self.selected_point is not None:
self.reg_pts[self.selected_point] = (x, y)
self.counting_region = Polygon(self.reg_pts)
elif event == cv2.EVENT_LBUTTONUP:
self.is_drawing = False
self.selected_point = None
def extract_and_process_tracks(self, tracks):
"""Extracts and processes tracks for object counting in a video stream."""
boxes = tracks[0].boxes.xyxy.cpu()
clss = tracks[0].boxes.cls.cpu().tolist()
track_ids = tracks[0].boxes.id.int().cpu().tolist()
# Annotator Init and region drawing
self.annotator = Annotator(self.im0, self.tf, self.names)
self.annotator.draw_region(reg_pts=self.reg_pts, color=self.region_color, thickness=self.region_thickness)
# Extract tracks
for box, track_id, cls in zip(boxes, track_ids, clss):
# Draw bounding box
self.annotator.box_label(box, label=f"{track_id}:{self.names[cls]}", color=colors(int(cls), True))
# Draw Tracks
track_line = self.track_history[track_id]
track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)))
if len(track_line) > 30:
track_line.pop(0)
# Draw track trails
if self.draw_tracks:
self.annotator.draw_centroid_and_tracks(
track_line, color=self.track_color, track_thickness=self.track_thickness
)
prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None
# Count objects
if len(self.reg_pts) == 4:
if (
prev_position is not None
and self.counting_region.contains(Point(track_line[-1]))
and track_id not in self.counting_list
):
self.counting_list.append(track_id)
if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
self.in_counts += 1
else:
self.out_counts += 1
elif len(self.reg_pts) == 2:
if prev_position is not None:
distance = Point(track_line[-1]).distance(self.counting_region)
if distance < self.line_dist_thresh and track_id not in self.counting_list:
self.counting_list.append(track_id)
if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
self.in_counts += 1
else:
self.out_counts += 1
incount_label = f"In Count : {self.in_counts}"
outcount_label = f"OutCount : {self.out_counts}"
# Display counts based on user choice
counts_label = None
if not self.view_in_counts and not self.view_out_counts:
counts_label = None
elif not self.view_in_counts:
counts_label = outcount_label
elif not self.view_out_counts:
counts_label = incount_label
else:
counts_label = f"{incount_label} {outcount_label}"
if counts_label is not None:
self.annotator.count_labels(
counts=counts_label,
count_txt_size=self.count_txt_thickness,
txt_color=self.count_txt_color,
color=self.count_color,
)
def display_frames(self):
"""Display frame."""
if self.env_check:
cv2.namedWindow("Ultralytics YOLOv8 Object Counter")
if len(self.reg_pts) == 4: # only add mouse event If user drawn region
cv2.setMouseCallback(
"Ultralytics YOLOv8 Object Counter", self.mouse_event_for_region, {"region_points": self.reg_pts}
)
cv2.imshow("Ultralytics YOLOv8 Object Counter", self.im0)
# Break Window
if cv2.waitKey(1) & 0xFF == ord("q"):
return
def start_counting(self, im0, tracks):
"""
Main function to start the object counting process.
Args:
im0 (ndarray): Current frame from the video stream.
tracks (list): List of tracks obtained from the object tracking process.
"""
self.im0 = im0 # store image
if tracks[0].boxes.id is None:
if self.view_img:
self.display_frames()
return im0
self.extract_and_process_tracks(tracks)
if self.view_img:
self.display_frames()
return self.im0
if __name__ == "__main__":
ObjectCounter()
| 10,474 | Python | .py | 227 | 34.317181 | 117 | 0.585146 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,914 | ai_gym.py | arojsubedi_Improved-YOLOv8s/ultralytics/solutions/ai_gym.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import cv2
from ultralytics.utils.checks import check_imshow
from ultralytics.utils.plotting import Annotator
class AIGym:
"""A class to manage the gym steps of people in a real-time video stream based on their poses."""
def __init__(self):
"""Initializes the AIGym with default values for Visual and Image parameters."""
# Image and line thickness
self.im0 = None
self.tf = None
# Keypoints and count information
self.keypoints = None
self.poseup_angle = None
self.posedown_angle = None
self.threshold = 0.001
# Store stage, count and angle information
self.angle = None
self.count = None
self.stage = None
self.pose_type = "pushup"
self.kpts_to_check = None
# Visual Information
self.view_img = False
self.annotator = None
# Check if environment support imshow
self.env_check = check_imshow(warn=True)
def set_args(
self,
kpts_to_check,
line_thickness=2,
view_img=False,
pose_up_angle=145.0,
pose_down_angle=90.0,
pose_type="pullup",
):
"""
Configures the AIGym line_thickness, save image and view image parameters.
Args:
kpts_to_check (list): 3 keypoints for counting
line_thickness (int): Line thickness for bounding boxes.
view_img (bool): display the im0
pose_up_angle (float): Angle to set pose position up
pose_down_angle (float): Angle to set pose position down
pose_type (str): "pushup", "pullup" or "abworkout"
"""
self.kpts_to_check = kpts_to_check
self.tf = line_thickness
self.view_img = view_img
self.poseup_angle = pose_up_angle
self.posedown_angle = pose_down_angle
self.pose_type = pose_type
def start_counting(self, im0, results, frame_count):
"""
Function used to count the gym steps.
Args:
im0 (ndarray): Current frame from the video stream.
results (list): Pose estimation data
frame_count (int): store current frame count
"""
self.im0 = im0
if frame_count == 1:
self.count = [0] * len(results[0])
self.angle = [0] * len(results[0])
self.stage = ["-" for _ in results[0]]
self.keypoints = results[0].keypoints.data
self.annotator = Annotator(im0, line_width=2)
num_keypoints = len(results[0])
# Resize self.angle, self.count, and self.stage if the number of keypoints has changed
if len(self.angle) != num_keypoints:
self.angle = [0] * num_keypoints
self.count = [0] * num_keypoints
self.stage = ["-" for _ in range(num_keypoints)]
for ind, k in enumerate(reversed(self.keypoints)):
if self.pose_type in ["pushup", "pullup"]:
self.angle[ind] = self.annotator.estimate_pose_angle(
k[int(self.kpts_to_check[0])].cpu(),
k[int(self.kpts_to_check[1])].cpu(),
k[int(self.kpts_to_check[2])].cpu(),
)
self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10)
if self.pose_type == "abworkout":
self.angle[ind] = self.annotator.estimate_pose_angle(
k[int(self.kpts_to_check[0])].cpu(),
k[int(self.kpts_to_check[1])].cpu(),
k[int(self.kpts_to_check[2])].cpu(),
)
self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10)
if self.angle[ind] > self.poseup_angle:
self.stage[ind] = "down"
if self.angle[ind] < self.posedown_angle and self.stage[ind] == "down":
self.stage[ind] = "up"
self.count[ind] += 1
self.annotator.plot_angle_and_count_and_stage(
angle_text=self.angle[ind],
count_text=self.count[ind],
stage_text=self.stage[ind],
center_kpt=k[int(self.kpts_to_check[1])],
line_thickness=self.tf,
)
if self.pose_type == "pushup":
if self.angle[ind] > self.poseup_angle:
self.stage[ind] = "up"
if self.angle[ind] < self.posedown_angle and self.stage[ind] == "up":
self.stage[ind] = "down"
self.count[ind] += 1
self.annotator.plot_angle_and_count_and_stage(
angle_text=self.angle[ind],
count_text=self.count[ind],
stage_text=self.stage[ind],
center_kpt=k[int(self.kpts_to_check[1])],
line_thickness=self.tf,
)
if self.pose_type == "pullup":
if self.angle[ind] > self.poseup_angle:
self.stage[ind] = "down"
if self.angle[ind] < self.posedown_angle and self.stage[ind] == "down":
self.stage[ind] = "up"
self.count[ind] += 1
self.annotator.plot_angle_and_count_and_stage(
angle_text=self.angle[ind],
count_text=self.count[ind],
stage_text=self.stage[ind],
center_kpt=k[int(self.kpts_to_check[1])],
line_thickness=self.tf,
)
self.annotator.kpts(k, shape=(640, 640), radius=1, kpt_line=True)
if self.env_check and self.view_img:
cv2.imshow("Ultralytics YOLOv8 AI GYM", self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
return self.im0
if __name__ == "__main__":
AIGym()
| 6,029 | Python | .py | 134 | 31.873134 | 114 | 0.540794 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,915 | heatmap.py | arojsubedi_Improved-YOLOv8s/ultralytics/solutions/heatmap.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import defaultdict
import cv2
import numpy as np
from ultralytics.utils.checks import check_imshow, check_requirements
from ultralytics.utils.plotting import Annotator
check_requirements("shapely>=2.0.0")
from shapely.geometry import LineString, Point, Polygon
class Heatmap:
"""A class to draw heatmaps in real-time video stream based on their tracks."""
def __init__(self):
"""Initializes the heatmap class with default values for Visual, Image, track, count and heatmap parameters."""
# Visual information
self.annotator = None
self.view_img = False
self.shape = "circle"
# Image information
self.imw = None
self.imh = None
self.im0 = None
self.view_in_counts = True
self.view_out_counts = True
# Heatmap colormap and heatmap np array
self.colormap = None
self.heatmap = None
self.heatmap_alpha = 0.5
# Predict/track information
self.boxes = None
self.track_ids = None
self.clss = None
self.track_history = defaultdict(list)
# Region & Line Information
self.count_reg_pts = None
self.counting_region = None
self.line_dist_thresh = 15
self.region_thickness = 5
self.region_color = (255, 0, 255)
# Object Counting Information
self.in_counts = 0
self.out_counts = 0
self.counting_list = []
self.count_txt_thickness = 0
self.count_txt_color = (0, 0, 0)
self.count_color = (255, 255, 255)
# Decay factor
self.decay_factor = 0.99
# Check if environment support imshow
self.env_check = check_imshow(warn=True)
def set_args(
self,
imw,
imh,
colormap=cv2.COLORMAP_JET,
heatmap_alpha=0.5,
view_img=False,
view_in_counts=True,
view_out_counts=True,
count_reg_pts=None,
count_txt_thickness=2,
count_txt_color=(0, 0, 0),
count_color=(255, 255, 255),
count_reg_color=(255, 0, 255),
region_thickness=5,
line_dist_thresh=15,
decay_factor=0.99,
shape="circle",
):
"""
Configures the heatmap colormap, width, height and display parameters.
Args:
colormap (cv2.COLORMAP): The colormap to be set.
imw (int): The width of the frame.
imh (int): The height of the frame.
heatmap_alpha (float): alpha value for heatmap display
view_img (bool): Flag indicating frame display
view_in_counts (bool): Flag to control whether to display the incounts on video stream.
view_out_counts (bool): Flag to control whether to display the outcounts on video stream.
count_reg_pts (list): Object counting region points
count_txt_thickness (int): Text thickness for object counting display
count_txt_color (RGB color): count text color value
count_color (RGB color): count text background color value
count_reg_color (RGB color): Color of object counting region
region_thickness (int): Object counting Region thickness
line_dist_thresh (int): Euclidean Distance threshold for line counter
decay_factor (float): value for removing heatmap area after object passed
shape (str): Heatmap shape, rect or circle shape supported
"""
self.imw = imw
self.imh = imh
self.heatmap_alpha = heatmap_alpha
self.view_img = view_img
self.view_in_counts = view_in_counts
self.view_out_counts = view_out_counts
self.colormap = colormap
# Region and line selection
if count_reg_pts is not None:
if len(count_reg_pts) == 2:
print("Line Counter Initiated.")
self.count_reg_pts = count_reg_pts
self.counting_region = LineString(count_reg_pts)
elif len(count_reg_pts) == 4:
print("Region Counter Initiated.")
self.count_reg_pts = count_reg_pts
self.counting_region = Polygon(self.count_reg_pts)
else:
print("Region or line points Invalid, 2 or 4 points supported")
print("Using Line Counter Now")
self.counting_region = Polygon([(20, 400), (1260, 400)]) # dummy points
# Heatmap new frame
self.heatmap = np.zeros((int(self.imh), int(self.imw)), dtype=np.float32)
self.count_txt_thickness = count_txt_thickness
self.count_txt_color = count_txt_color
self.count_color = count_color
self.region_color = count_reg_color
self.region_thickness = region_thickness
self.decay_factor = decay_factor
self.line_dist_thresh = line_dist_thresh
self.shape = shape
# shape of heatmap, if not selected
if self.shape not in ["circle", "rect"]:
print("Unknown shape value provided, 'circle' & 'rect' supported")
print("Using Circular shape now")
self.shape = "circle"
def extract_results(self, tracks):
"""
Extracts results from the provided data.
Args:
tracks (list): List of tracks obtained from the object tracking process.
"""
self.boxes = tracks[0].boxes.xyxy.cpu()
self.clss = tracks[0].boxes.cls.cpu().tolist()
self.track_ids = tracks[0].boxes.id.int().cpu().tolist()
def generate_heatmap(self, im0, tracks):
"""
Generate heatmap based on tracking data.
Args:
im0 (nd array): Image
tracks (list): List of tracks obtained from the object tracking process.
"""
self.im0 = im0
if tracks[0].boxes.id is None:
self.heatmap = np.zeros((int(self.imh), int(self.imw)), dtype=np.float32)
if self.view_img and self.env_check:
self.display_frames()
return im0
self.heatmap *= self.decay_factor # decay factor
self.extract_results(tracks)
self.annotator = Annotator(self.im0, self.count_txt_thickness, None)
if self.count_reg_pts is not None:
# Draw counting region
if self.view_in_counts or self.view_out_counts:
self.annotator.draw_region(
reg_pts=self.count_reg_pts, color=self.region_color, thickness=self.region_thickness
)
for box, cls, track_id in zip(self.boxes, self.clss, self.track_ids):
if self.shape == "circle":
center = (int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2))
radius = min(int(box[2]) - int(box[0]), int(box[3]) - int(box[1])) // 2
y, x = np.ogrid[0 : self.heatmap.shape[0], 0 : self.heatmap.shape[1]]
mask = (x - center[0]) ** 2 + (y - center[1]) ** 2 <= radius**2
self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += (
2 * mask[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])]
)
else:
self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += 2
# Store tracking hist
track_line = self.track_history[track_id]
track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)))
if len(track_line) > 30:
track_line.pop(0)
# Count objects
if len(self.count_reg_pts) == 4:
if self.counting_region.contains(Point(track_line[-1])) and track_id not in self.counting_list:
self.counting_list.append(track_id)
if box[0] < self.counting_region.centroid.x:
self.out_counts += 1
else:
self.in_counts += 1
elif len(self.count_reg_pts) == 2:
distance = Point(track_line[-1]).distance(self.counting_region)
if distance < self.line_dist_thresh and track_id not in self.counting_list:
self.counting_list.append(track_id)
if box[0] < self.counting_region.centroid.x:
self.out_counts += 1
else:
self.in_counts += 1
else:
for box, cls in zip(self.boxes, self.clss):
if self.shape == "circle":
center = (int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2))
radius = min(int(box[2]) - int(box[0]), int(box[3]) - int(box[1])) // 2
y, x = np.ogrid[0 : self.heatmap.shape[0], 0 : self.heatmap.shape[1]]
mask = (x - center[0]) ** 2 + (y - center[1]) ** 2 <= radius**2
self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += (
2 * mask[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])]
)
else:
self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += 2
# Normalize, apply colormap to heatmap and combine with original image
heatmap_normalized = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX)
heatmap_colored = cv2.applyColorMap(heatmap_normalized.astype(np.uint8), self.colormap)
incount_label = f"In Count : {self.in_counts}"
outcount_label = f"OutCount : {self.out_counts}"
# Display counts based on user choice
counts_label = None
if not self.view_in_counts and not self.view_out_counts:
counts_label = None
elif not self.view_in_counts:
counts_label = outcount_label
elif not self.view_out_counts:
counts_label = incount_label
else:
counts_label = f"{incount_label} {outcount_label}"
if self.count_reg_pts is not None and counts_label is not None:
self.annotator.count_labels(
counts=counts_label,
count_txt_size=self.count_txt_thickness,
txt_color=self.count_txt_color,
color=self.count_color,
)
self.im0 = cv2.addWeighted(self.im0, 1 - self.heatmap_alpha, heatmap_colored, self.heatmap_alpha, 0)
if self.env_check and self.view_img:
self.display_frames()
return self.im0
def display_frames(self):
"""Display frame."""
cv2.imshow("Ultralytics Heatmap", self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
if __name__ == "__main__":
Heatmap()
| 10,928 | Python | .py | 231 | 35.060606 | 119 | 0.565042 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,916 | bot_sort.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/bot_sort.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import deque
import numpy as np
from .basetrack import TrackState
from .byte_tracker import BYTETracker, STrack
from .utils import matching
from .utils.gmc import GMC
from .utils.kalman_filter import KalmanFilterXYWH
class BOTrack(STrack):
"""
An extended version of the STrack class for YOLOv8, adding object tracking features.
Attributes:
shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack.
smooth_feat (np.ndarray): Smoothed feature vector.
curr_feat (np.ndarray): Current feature vector.
features (deque): A deque to store feature vectors with a maximum length defined by `feat_history`.
alpha (float): Smoothing factor for the exponential moving average of features.
mean (np.ndarray): The mean state of the Kalman filter.
covariance (np.ndarray): The covariance matrix of the Kalman filter.
Methods:
update_features(feat): Update features vector and smooth it using exponential moving average.
predict(): Predicts the mean and covariance using Kalman filter.
re_activate(new_track, frame_id, new_id): Reactivates a track with updated features and optionally new ID.
update(new_track, frame_id): Update the YOLOv8 instance with new track and frame ID.
tlwh: Property that gets the current position in tlwh format `(top left x, top left y, width, height)`.
multi_predict(stracks): Predicts the mean and covariance of multiple object tracks using shared Kalman filter.
convert_coords(tlwh): Converts tlwh bounding box coordinates to xywh format.
tlwh_to_xywh(tlwh): Convert bounding box to xywh format `(center x, center y, width, height)`.
Usage:
bo_track = BOTrack(tlwh, score, cls, feat)
bo_track.predict()
bo_track.update(new_track, frame_id)
"""
shared_kalman = KalmanFilterXYWH()
def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
"""Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features."""
super().__init__(tlwh, score, cls)
self.smooth_feat = None
self.curr_feat = None
if feat is not None:
self.update_features(feat)
self.features = deque([], maxlen=feat_history)
self.alpha = 0.9
def update_features(self, feat):
"""Update features vector and smooth it using exponential moving average."""
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
"""Predicts the mean and covariance using Kalman filter."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[6] = 0
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a track with updated features and optionally assigns a new ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().re_activate(new_track, frame_id, new_id)
def update(self, new_track, frame_id):
"""Update the YOLOv8 instance with new track and frame ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().update(new_track, frame_id)
@property
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y, width, height)`."""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[:2] -= ret[2:] / 2
return ret
@staticmethod
def multi_predict(stracks):
"""Predicts the mean and covariance of multiple object tracks using shared Kalman filter."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][6] = 0
multi_mean[i][7] = 0
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def convert_coords(self, tlwh):
"""Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format."""
return self.tlwh_to_xywh(tlwh)
@staticmethod
def tlwh_to_xywh(tlwh):
"""Convert bounding box to format `(center x, center y, width, height)`."""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
class BOTSORT(BYTETracker):
"""
An extended version of the BYTETracker class for YOLOv8, designed for object tracking with ReID and GMC algorithm.
Attributes:
proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections.
appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections.
encoder (object): Object to handle ReID embeddings, set to None if ReID is not enabled.
gmc (GMC): An instance of the GMC algorithm for data association.
args (object): Parsed command-line arguments containing tracking parameters.
Methods:
get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking.
init_track(dets, scores, cls, img): Initialize track with detections, scores, and classes.
get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID.
multi_predict(tracks): Predict and track multiple objects with YOLOv8 model.
Usage:
bot_sort = BOTSORT(args, frame_rate)
bot_sort.init_track(dets, scores, cls, img)
bot_sort.multi_predict(tracks)
Note:
The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args.
"""
def __init__(self, args, frame_rate=30):
"""Initialize YOLOv8 object with ReID module and GMC algorithm."""
super().__init__(args, frame_rate)
# ReID module
self.proximity_thresh = args.proximity_thresh
self.appearance_thresh = args.appearance_thresh
if args.with_reid:
# Haven't supported BoT-SORT(reid) yet
self.encoder = None
self.gmc = GMC(method=args.gmc_method)
def get_kalmanfilter(self):
"""Returns an instance of KalmanFilterXYWH for object tracking."""
return KalmanFilterXYWH()
def init_track(self, dets, scores, cls, img=None):
"""Initialize track with detections, scores, and classes."""
if len(dets) == 0:
return []
if self.args.with_reid and self.encoder is not None:
features_keep = self.encoder.inference(img, dets)
return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
else:
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
def get_dists(self, tracks, detections):
"""Get distances between tracks and detections using IoU and (optionally) ReID embeddings."""
dists = matching.iou_distance(tracks, detections)
dists_mask = dists > self.proximity_thresh
# TODO: mot20
# if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
if self.args.with_reid and self.encoder is not None:
emb_dists = matching.embedding_distance(tracks, detections) / 2.0
emb_dists[emb_dists > self.appearance_thresh] = 1.0
emb_dists[dists_mask] = 1.0
dists = np.minimum(dists, emb_dists)
return dists
def multi_predict(self, tracks):
"""Predict and track multiple objects with YOLOv8 model."""
BOTrack.multi_predict(tracks)
def reset(self):
"""Reset tracker."""
super().reset()
self.gmc.reset_params()
| 8,601 | Python | .py | 165 | 43.545455 | 120 | 0.662302 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,917 | track.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/track.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from functools import partial
from pathlib import Path
import torch
from ultralytics.utils import IterableSimpleNamespace, yaml_load
from ultralytics.utils.checks import check_yaml
from .bot_sort import BOTSORT
from .byte_tracker import BYTETracker
# A mapping of tracker types to corresponding tracker classes
TRACKER_MAP = {"bytetrack": BYTETracker, "botsort": BOTSORT}
def on_predict_start(predictor: object, persist: bool = False) -> None:
"""
Initialize trackers for object tracking during prediction.
Args:
predictor (object): The predictor object to initialize trackers for.
persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
Raises:
AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
"""
if hasattr(predictor, "trackers") and persist:
return
tracker = check_yaml(predictor.args.tracker)
cfg = IterableSimpleNamespace(**yaml_load(tracker))
if cfg.tracker_type not in ["bytetrack", "botsort"]:
raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'")
trackers = []
for _ in range(predictor.dataset.bs):
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
trackers.append(tracker)
predictor.trackers = trackers
def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None:
"""
Postprocess detected boxes and update with object tracking.
Args:
predictor (object): The predictor object containing the predictions.
persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
"""
bs = predictor.dataset.bs
path, im0s = predictor.batch[:2]
is_obb = predictor.args.task == "obb"
for i in range(bs):
if not persist and predictor.vid_path[i] != str(predictor.save_dir / Path(path[i]).name): # new video
predictor.trackers[i].reset()
det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
if len(det) == 0:
continue
tracks = predictor.trackers[i].update(det, im0s[i])
if len(tracks) == 0:
continue
idx = tracks[:, -1].astype(int)
predictor.results[i] = predictor.results[i][idx]
update_args = dict()
update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1])
predictor.results[i].update(**update_args)
def register_tracker(model: object, persist: bool) -> None:
"""
Register tracking callbacks to the model for object tracking during prediction.
Args:
model (object): The model object to register tracking callbacks for.
persist (bool): Whether to persist the trackers if they already exist.
"""
model.add_callback("on_predict_start", partial(on_predict_start, persist=persist))
model.add_callback("on_predict_postprocess_end", partial(on_predict_postprocess_end, persist=persist))
| 3,091 | Python | .py | 63 | 42.857143 | 115 | 0.698471 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,918 | byte_tracker.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/byte_tracker.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
from .basetrack import BaseTrack, TrackState
from .utils import matching
from .utils.kalman_filter import KalmanFilterXYAH
from ..utils.ops import xywh2ltwh
from ..utils import LOGGER
class STrack(BaseTrack):
"""
Single object tracking representation that uses Kalman filtering for state estimation.
This class is responsible for storing all the information regarding individual tracklets and performs state updates
and predictions based on Kalman filter.
Attributes:
shared_kalman (KalmanFilterXYAH): Shared Kalman filter that is used across all STrack instances for prediction.
_tlwh (np.ndarray): Private attribute to store top-left corner coordinates and width and height of bounding box.
kalman_filter (KalmanFilterXYAH): Instance of Kalman filter used for this particular object track.
mean (np.ndarray): Mean state estimate vector.
covariance (np.ndarray): Covariance of state estimate.
is_activated (bool): Boolean flag indicating if the track has been activated.
score (float): Confidence score of the track.
tracklet_len (int): Length of the tracklet.
cls (any): Class label for the object.
idx (int): Index or identifier for the object.
frame_id (int): Current frame ID.
start_frame (int): Frame where the object was first detected.
Methods:
predict(): Predict the next state of the object using Kalman filter.
multi_predict(stracks): Predict the next states for multiple tracks.
multi_gmc(stracks, H): Update multiple track states using a homography matrix.
activate(kalman_filter, frame_id): Activate a new tracklet.
re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet.
update(new_track, frame_id): Update the state of a matched track.
convert_coords(tlwh): Convert bounding box to x-y-aspect-height format.
tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format.
"""
shared_kalman = KalmanFilterXYAH()
def __init__(self, xywh, score, cls):
"""Initialize new STrack instance."""
super().__init__()
# xywh+idx or xywha+idx
assert len(xywh) in [5, 6], f"expected 5 or 6 values but got {len(xywh)}"
self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.cls = cls
self.idx = xywh[-1]
self.angle = xywh[4] if len(xywh) == 6 else None
def predict(self):
"""Predicts mean and covariance using Kalman filter."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
"""Perform multi-object predictive tracking using Kalman filter for given stracks."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
@staticmethod
def multi_gmc(stracks, H=np.eye(2, 3)):
"""Update state tracks positions and covariances using a homography matrix."""
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
R = H[:2, :2]
R8x8 = np.kron(np.eye(4, dtype=float), R)
t = H[:2, 2]
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
mean = R8x8.dot(mean)
mean[:2] += t
cov = R8x8.dot(cov).dot(R8x8.transpose())
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet."""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a previously lost track with a new detection."""
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.convert_coords(new_track.tlwh)
)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
self.cls = new_track.cls
self.angle = new_track.angle
self.idx = new_track.idx
def update(self, new_track, frame_id):
"""
Update the state of a matched track.
Args:
new_track (STrack): The new track containing updated information.
frame_id (int): The ID of the current frame.
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.convert_coords(new_tlwh)
)
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
self.cls = new_track.cls
self.angle = new_track.angle
self.idx = new_track.idx
def convert_coords(self, tlwh):
"""Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent."""
return self.tlwh_to_xyah(tlwh)
@property
def tlwh(self):
"""Get current position in bounding box format (top left x, top left y, width, height)."""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
def xyxy(self):
"""Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right)."""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format (center x, center y, aspect ratio, height), where the aspect ratio is width /
height.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
@property
def xywh(self):
"""Get current position in bounding box format (center x, center y, width, height)."""
ret = np.asarray(self.tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
@property
def xywha(self):
"""Get current position in bounding box format (center x, center y, width, height, angle)."""
if self.angle is None:
LOGGER.warning("WARNING ⚠� `angle` attr not found, returning `xywh` instead.")
return self.xywh
return np.concatenate([self.xywh, self.angle[None]])
@property
def result(self):
"""Get current tracking results."""
coords = self.xyxy if self.angle is None else self.xywha
return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
def __repr__(self):
"""Return a string representation of the BYTETracker object with start and end frames and track ID."""
return f"OT_{self.track_id}_({self.start_frame}-{self.end_frame})"
class BYTETracker:
"""
BYTETracker: A tracking algorithm built on top of YOLOv8 for object detection and tracking.
The class is responsible for initializing, updating, and managing the tracks for detected objects in a video
sequence. It maintains the state of tracked, lost, and removed tracks over frames, utilizes Kalman filtering for
predicting the new object locations, and performs data association.
Attributes:
tracked_stracks (list[STrack]): List of successfully activated tracks.
lost_stracks (list[STrack]): List of lost tracks.
removed_stracks (list[STrack]): List of removed tracks.
frame_id (int): The current frame ID.
args (namespace): Command-line arguments.
max_time_lost (int): The maximum frames for a track to be considered as 'lost'.
kalman_filter (object): Kalman Filter object.
Methods:
update(results, img=None): Updates object tracker with new detections.
get_kalmanfilter(): Returns a Kalman filter object for tracking bounding boxes.
init_track(dets, scores, cls, img=None): Initialize object tracking with detections.
get_dists(tracks, detections): Calculates the distance between tracks and detections.
multi_predict(tracks): Predicts the location of tracks.
reset_id(): Resets the ID counter of STrack.
joint_stracks(tlista, tlistb): Combines two lists of stracks.
sub_stracks(tlista, tlistb): Filters out the stracks present in the second list from the first list.
remove_duplicate_stracks(stracksa, stracksb): Removes duplicate stracks based on IOU.
"""
def __init__(self, args, frame_rate=30):
"""Initialize a YOLOv8 object to track objects with given arguments and frame rate."""
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.args = args
self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer)
self.kalman_filter = self.get_kalmanfilter()
self.reset_id()
def update(self, results, img=None):
"""Updates object tracker with new detections and returns tracked object bounding boxes."""
self.frame_id += 1
activated_stracks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
scores = results.conf
bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh
# Add index
bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
cls = results.cls
remain_inds = scores > self.args.track_high_thresh
inds_low = scores > self.args.track_low_thresh
inds_high = scores < self.args.track_high_thresh
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
cls_keep = cls[remain_inds]
cls_second = cls[inds_second]
detections = self.init_track(dets, scores_keep, cls_keep, img)
# Add newly detected tracklets to tracked_stracks
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
# Step 2: First association, with high score detection boxes
strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
self.multi_predict(strack_pool)
if hasattr(self, "gmc") and img is not None:
warp = self.gmc.apply(img, dets)
STrack.multi_gmc(strack_pool, warp)
STrack.multi_gmc(unconfirmed, warp)
dists = self.get_dists(strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# Step 3: Second association, with low score detection boxes association the untrack to the low score detections
detections_second = self.init_track(dets_second, scores_second, cls_second, img)
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
# TODO
dists = matching.iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if track.state != TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
# Deal with unconfirmed tracks, usually tracks with only one beginning frame
detections = [detections[i] for i in u_detection]
dists = self.get_dists(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_stracks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
# Step 4: Init new stracks
for inew in u_detection:
track = detections[inew]
if track.score < self.args.new_track_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_stracks.append(track)
# Step 5: Update state
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks)
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks)
self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
self.removed_stracks.extend(removed_stracks)
if len(self.removed_stracks) > 1000:
self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
def get_kalmanfilter(self):
"""Returns a Kalman filter object for tracking bounding boxes."""
return KalmanFilterXYAH()
def init_track(self, dets, scores, cls, img=None):
"""Initialize object tracking with detections and scores using STrack algorithm."""
return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
def get_dists(self, tracks, detections):
"""Calculates the distance between tracks and detections using IOU and fuses scores."""
dists = matching.iou_distance(tracks, detections)
# TODO: mot20
# if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
return dists
def multi_predict(self, tracks):
"""Returns the predicted tracks using the YOLOv8 network."""
STrack.multi_predict(tracks)
@staticmethod
def reset_id():
"""Resets the ID counter of STrack."""
STrack.reset_id()
def reset(self):
"""Reset tracker."""
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.kalman_filter = self.get_kalmanfilter()
self.reset_id()
@staticmethod
def joint_stracks(tlista, tlistb):
"""Combine two lists of stracks into a single one."""
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
@staticmethod
def sub_stracks(tlista, tlistb):
"""DEPRECATED CODE in https://github.com/ultralytics/ultralytics/pull/1890/
stracks = {t.track_id: t for t in tlista}
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
"""
track_ids_b = {t.track_id for t in tlistb}
return [t for t in tlista if t.track_id not in track_ids_b]
@staticmethod
def remove_duplicate_stracks(stracksa, stracksb):
"""Remove duplicate stracks with non-maximum IOU distance."""
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = [], []
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if i not in dupa]
resb = [t for i, t in enumerate(stracksb) if i not in dupb]
return resa, resb
| 18,871 | Python | .py | 387 | 39.242894 | 120 | 0.633907 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,919 | __init__.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/__init__.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
from .bot_sort import BOTSORT
from .byte_tracker import BYTETracker
from .track import register_tracker
__all__ = "register_tracker", "BOTSORT", "BYTETracker" # allow simpler import
| 227 | Python | .py | 5 | 44 | 78 | 0.777273 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,920 | basetrack.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/basetrack.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
"""This module defines the base classes and structures for object tracking in YOLO."""
from collections import OrderedDict
import numpy as np
class TrackState:
"""
Enumeration class representing the possible states of an object being tracked.
Attributes:
New (int): State when the object is newly detected.
Tracked (int): State when the object is successfully tracked in subsequent frames.
Lost (int): State when the object is no longer tracked.
Removed (int): State when the object is removed from tracking.
"""
New = 0
Tracked = 1
Lost = 2
Removed = 3
class BaseTrack:
"""
Base class for object tracking, providing foundational attributes and methods.
Attributes:
_count (int): Class-level counter for unique track IDs.
track_id (int): Unique identifier for the track.
is_activated (bool): Flag indicating whether the track is currently active.
state (TrackState): Current state of the track.
history (OrderedDict): Ordered history of the track's states.
features (list): List of features extracted from the object for tracking.
curr_feature (any): The current feature of the object being tracked.
score (float): The confidence score of the tracking.
start_frame (int): The frame number where tracking started.
frame_id (int): The most recent frame ID processed by the track.
time_since_update (int): Frames passed since the last update.
location (tuple): The location of the object in the context of multi-camera tracking.
Methods:
end_frame: Returns the ID of the last frame where the object was tracked.
next_id: Increments and returns the next global track ID.
activate: Abstract method to activate the track.
predict: Abstract method to predict the next state of the track.
update: Abstract method to update the track with new data.
mark_lost: Marks the track as lost.
mark_removed: Marks the track as removed.
reset_id: Resets the global track ID counter.
"""
_count = 0
def __init__(self):
"""Initializes a new track with unique ID and foundational tracking attributes."""
self.track_id = 0
self.is_activated = False
self.state = TrackState.New
self.history = OrderedDict()
self.features = []
self.curr_feature = None
self.score = 0
self.start_frame = 0
self.frame_id = 0
self.time_since_update = 0
self.location = (np.inf, np.inf)
@property
def end_frame(self):
"""Return the last frame ID of the track."""
return self.frame_id
@staticmethod
def next_id():
"""Increment and return the global track ID counter."""
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
"""Abstract method to activate the track with provided arguments."""
raise NotImplementedError
def predict(self):
"""Abstract method to predict the next state of the track."""
raise NotImplementedError
def update(self, *args, **kwargs):
"""Abstract method to update the track with new observations."""
raise NotImplementedError
def mark_lost(self):
"""Mark the track as lost."""
self.state = TrackState.Lost
def mark_removed(self):
"""Mark the track as removed."""
self.state = TrackState.Removed
@staticmethod
def reset_id():
"""Reset the global track ID counter."""
BaseTrack._count = 0
| 3,675 | Python | .py | 85 | 35.788235 | 93 | 0.667787 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,921 | matching.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/utils/matching.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from ultralytics.utils.metrics import bbox_ioa, batch_probiou
try:
import lap # for linear_assignment
assert lap.__version__ # verify package is not directory
except (ImportError, AssertionError, AttributeError):
from ultralytics.utils.checks import check_requirements
check_requirements("lapx>=0.5.2") # update to lap package from https://github.com/rathaROG/lapx
import lap
def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple:
"""
Perform linear assignment using scipy or lap.lapjv.
Args:
cost_matrix (np.ndarray): The matrix containing cost values for assignments.
thresh (float): Threshold for considering an assignment valid.
use_lap (bool, optional): Whether to use lap.lapjv. Defaults to True.
Returns:
Tuple with:
- matched indices
- unmatched indices from 'a'
- unmatched indices from 'b'
"""
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
if use_lap:
# Use lap.lapjv
# https://github.com/gatagat/lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0]
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
else:
# Use scipy.optimize.linear_sum_assignment
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y
matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh])
if len(matches) == 0:
unmatched_a = list(np.arange(cost_matrix.shape[0]))
unmatched_b = list(np.arange(cost_matrix.shape[1]))
else:
unmatched_a = list(set(np.arange(cost_matrix.shape[0])) - set(matches[:, 0]))
unmatched_b = list(set(np.arange(cost_matrix.shape[1])) - set(matches[:, 1]))
return matches, unmatched_a, unmatched_b
def iou_distance(atracks: list, btracks: list) -> np.ndarray:
"""
Compute cost based on Intersection over Union (IoU) between tracks.
Args:
atracks (list[STrack] | list[np.ndarray]): List of tracks 'a' or bounding boxes.
btracks (list[STrack] | list[np.ndarray]): List of tracks 'b' or bounding boxes.
Returns:
(np.ndarray): Cost matrix computed based on IoU.
"""
if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks]
btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks]
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
if len(atlbrs) and len(btlbrs):
if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5:
ious = batch_probiou(
np.ascontiguousarray(atlbrs, dtype=np.float32),
np.ascontiguousarray(btlbrs, dtype=np.float32),
).numpy()
else:
ious = bbox_ioa(
np.ascontiguousarray(atlbrs, dtype=np.float32),
np.ascontiguousarray(btlbrs, dtype=np.float32),
iou=True,
)
return 1 - ious # cost matrix
def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -> np.ndarray:
"""
Compute distance between tracks and detections based on embeddings.
Args:
tracks (list[STrack]): List of tracks.
detections (list[BaseTrack]): List of detections.
metric (str, optional): Metric for distance computation. Defaults to 'cosine'.
Returns:
(np.ndarray): Cost matrix computed based on embeddings.
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32)
# for i, track in enumerate(tracks):
# cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features
return cost_matrix
def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray:
"""
Fuses cost matrix with detection scores to produce a single similarity matrix.
Args:
cost_matrix (np.ndarray): The matrix containing cost values for assignments.
detections (list[BaseTrack]): List of detections with scores.
Returns:
(np.ndarray): Fused similarity matrix.
"""
if cost_matrix.size == 0:
return cost_matrix
iou_sim = 1 - cost_matrix
det_scores = np.array([det.score for det in detections])
det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
fuse_sim = iou_sim * det_scores
return 1 - fuse_sim # fuse_cost
| 5,404 | Python | .py | 110 | 41.654545 | 114 | 0.657615 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,922 | kalman_filter.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/utils/kalman_filter.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
import scipy.linalg
class KalmanFilterXYAH:
"""
For bytetrack. A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y), aspect
ratio a, height h, and their respective velocities.
Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct
observation of the state space (linear observation model).
"""
def __init__(self):
"""Initialize Kalman filter model matrices with motion and observation uncertainty weights."""
ndim, dt = 4, 1.0
# Create Kalman filter model matrices
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current state estimate. These weights control
# the amount of uncertainty in the model.
self._std_weight_position = 1.0 / 20
self._std_weight_velocity = 1.0 / 160
def initiate(self, measurement: np.ndarray) -> tuple:
"""
Create track from unassociated measurement.
Args:
measurement (ndarray): Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a,
and height h.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of
the new track. Unobserved velocities are initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[3],
1e-2,
2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[3],
1e-5,
10 * self._std_weight_velocity * measurement[3],
]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple:
"""
Run Kalman filter prediction step.
Args:
mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step.
covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved
velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[3],
self._std_weight_position * mean[3],
1e-2,
self._std_weight_position * mean[3],
]
std_vel = [
self._std_weight_velocity * mean[3],
self._std_weight_velocity * mean[3],
1e-5,
self._std_weight_velocity * mean[3],
]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean: np.ndarray, covariance: np.ndarray) -> tuple:
"""
Project state distribution to measurement space.
Args:
mean (ndarray): The state's mean vector (8 dimensional array).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
Returns:
(tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate.
"""
std = [
self._std_weight_position * mean[3],
self._std_weight_position * mean[3],
1e-1,
self._std_weight_position * mean[3],
]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple:
"""
Run Kalman filter prediction step (Vectorized version).
Args:
mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step.
covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved
velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 3],
self._std_weight_position * mean[:, 3],
1e-2 * np.ones_like(mean[:, 3]),
self._std_weight_position * mean[:, 3],
]
std_vel = [
self._std_weight_velocity * mean[:, 3],
self._std_weight_velocity * mean[:, 3],
1e-5 * np.ones_like(mean[:, 3]),
self._std_weight_velocity * mean[:, 3],
]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean: np.ndarray, covariance: np.ndarray, measurement: np.ndarray) -> tuple:
"""
Run Kalman filter correction step.
Args:
mean (ndarray): The predicted state's mean vector (8 dimensional).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
measurement (ndarray): The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center
position, a the aspect ratio, and h the height of the bounding box.
Returns:
(tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False
).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(
self,
mean: np.ndarray,
covariance: np.ndarray,
measurements: np.ndarray,
only_position: bool = False,
metric: str = "maha",
) -> np.ndarray:
"""
Compute gating distance between state distribution and measurements. A suitable distance threshold can be
obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom,
otherwise 2.
Args:
mean (ndarray): Mean vector over the state distribution (8 dimensional).
covariance (ndarray): Covariance of the state distribution (8x8 dimensional).
measurements (ndarray): An Nx4 matrix of N measurements, each in format (x, y, a, h) where (x, y)
is the bounding box center position, a the aspect ratio, and h the height.
only_position (bool, optional): If True, distance computation is done with respect to the bounding box
center position only. Defaults to False.
metric (str, optional): The metric to use for calculating the distance. Options are 'gaussian' for the
squared Euclidean distance and 'maha' for the squared Mahalanobis distance. Defaults to 'maha'.
Returns:
(np.ndarray): Returns an array of length N, where the i-th element contains the squared distance between
(mean, covariance) and `measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == "gaussian":
return np.sum(d * d, axis=1)
elif metric == "maha":
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
return np.sum(z * z, axis=0) # square maha
else:
raise ValueError("Invalid distance metric")
class KalmanFilterXYWH(KalmanFilterXYAH):
"""
For BoT-SORT. A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y), width
w, height h, and their respective velocities.
Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct
observation of the state space (linear observation model).
"""
def initiate(self, measurement: np.ndarray) -> tuple:
"""
Create track from unassociated measurement.
Args:
measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of
the new track. Unobserved velocities are initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[2],
2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[2],
2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[2],
10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[2],
10 * self._std_weight_velocity * measurement[3],
]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance) -> tuple:
"""
Run Kalman filter prediction step.
Args:
mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step.
covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved
velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
]
std_vel = [
self._std_weight_velocity * mean[2],
self._std_weight_velocity * mean[3],
self._std_weight_velocity * mean[2],
self._std_weight_velocity * mean[3],
]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance) -> tuple:
"""
Project state distribution to measurement space.
Args:
mean (ndarray): The state's mean vector (8 dimensional array).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
Returns:
(tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate.
"""
std = [
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance) -> tuple:
"""
Run Kalman filter prediction step (Vectorized version).
Args:
mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step.
covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved
velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 2],
self._std_weight_position * mean[:, 3],
self._std_weight_position * mean[:, 2],
self._std_weight_position * mean[:, 3],
]
std_vel = [
self._std_weight_velocity * mean[:, 2],
self._std_weight_velocity * mean[:, 3],
self._std_weight_velocity * mean[:, 2],
self._std_weight_velocity * mean[:, 3],
]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement) -> tuple:
"""
Run Kalman filter correction step.
Args:
mean (ndarray): The predicted state's mean vector (8 dimensional).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
measurement (ndarray): The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center
position, w the width, and h the height of the bounding box.
Returns:
(tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution.
"""
return super().update(mean, covariance, measurement)
| 15,168 | Python | .py | 298 | 40.483221 | 121 | 0.615073 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,923 | gmc.py | arojsubedi_Improved-YOLOv8s/ultralytics/trackers/utils/gmc.py | # Ultralytics YOLO 🚀, AGPL-3.0 license
import copy
import cv2
import numpy as np
from ultralytics.utils import LOGGER
class GMC:
"""
Generalized Motion Compensation (GMC) class for tracking and object detection in video frames.
This class provides methods for tracking and detecting objects based on several tracking algorithms including ORB,
SIFT, ECC, and Sparse Optical Flow. It also supports downscaling of frames for computational efficiency.
Attributes:
method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
downscale (int): Factor by which to downscale the frames for processing.
prevFrame (np.ndarray): Stores the previous frame for tracking.
prevKeyPoints (list): Stores the keypoints from the previous frame.
prevDescriptors (np.ndarray): Stores the descriptors from the previous frame.
initializedFirstFrame (bool): Flag to indicate if the first frame has been processed.
Methods:
__init__(self, method='sparseOptFlow', downscale=2): Initializes a GMC object with the specified method
and downscale factor.
apply(self, raw_frame, detections=None): Applies the chosen method to a raw frame and optionally uses
provided detections.
applyEcc(self, raw_frame, detections=None): Applies the ECC algorithm to a raw frame.
applyFeatures(self, raw_frame, detections=None): Applies feature-based methods like ORB or SIFT to a raw frame.
applySparseOptFlow(self, raw_frame, detections=None): Applies the Sparse Optical Flow method to a raw frame.
"""
def __init__(self, method: str = "sparseOptFlow", downscale: int = 2) -> None:
"""
Initialize a video tracker with specified parameters.
Args:
method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
downscale (int): Downscale factor for processing frames.
"""
super().__init__()
self.method = method
self.downscale = max(1, int(downscale))
if self.method == "orb":
self.detector = cv2.FastFeatureDetector_create(20)
self.extractor = cv2.ORB_create()
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
elif self.method == "sift":
self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
elif self.method == "ecc":
number_of_iterations = 5000
termination_eps = 1e-6
self.warp_mode = cv2.MOTION_EUCLIDEAN
self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
elif self.method == "sparseOptFlow":
self.feature_params = dict(
maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04
)
elif self.method in {"none", "None", None}:
self.method = None
else:
raise ValueError(f"Error: Unknown GMC method:{method}")
self.prevFrame = None
self.prevKeyPoints = None
self.prevDescriptors = None
self.initializedFirstFrame = False
def apply(self, raw_frame: np.array, detections: list = None) -> np.array:
"""
Apply object detection on a raw frame using specified method.
Args:
raw_frame (np.ndarray): The raw frame to be processed.
detections (list): List of detections to be used in the processing.
Returns:
(np.ndarray): Processed frame.
Examples:
>>> gmc = GMC()
>>> gmc.apply(np.array([[1, 2, 3], [4, 5, 6]]))
array([[1, 2, 3],
[4, 5, 6]])
"""
if self.method in ["orb", "sift"]:
return self.applyFeatures(raw_frame, detections)
elif self.method == "ecc":
return self.applyEcc(raw_frame)
elif self.method == "sparseOptFlow":
return self.applySparseOptFlow(raw_frame)
else:
return np.eye(2, 3)
def applyEcc(self, raw_frame: np.array) -> np.array:
"""
Apply ECC algorithm to a raw frame.
Args:
raw_frame (np.ndarray): The raw frame to be processed.
Returns:
(np.ndarray): Processed frame.
Examples:
>>> gmc = GMC()
>>> gmc.applyEcc(np.array([[1, 2, 3], [4, 5, 6]]))
array([[1, 2, 3],
[4, 5, 6]])
"""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3, dtype=np.float32)
# Downscale image
if self.downscale > 1.0:
frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
# Initialization done
self.initializedFirstFrame = True
return H
# Run the ECC algorithm. The results are stored in warp_matrix.
# (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)
try:
(_, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)
except Exception as e:
LOGGER.warning(f"WARNING: find transform failed. Set warp as identity {e}")
return H
def applyFeatures(self, raw_frame: np.array, detections: list = None) -> np.array:
"""
Apply feature-based methods like ORB or SIFT to a raw frame.
Args:
raw_frame (np.ndarray): The raw frame to be processed.
detections (list): List of detections to be used in the processing.
Returns:
(np.ndarray): Processed frame.
Examples:
>>> gmc = GMC()
>>> gmc.applyFeatures(np.array([[1, 2, 3], [4, 5, 6]]))
array([[1, 2, 3],
[4, 5, 6]])
"""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image
if self.downscale > 1.0:
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Find the keypoints
mask = np.zeros_like(frame)
mask[int(0.02 * height) : int(0.98 * height), int(0.02 * width) : int(0.98 * width)] = 255
if detections is not None:
for det in detections:
tlbr = (det[:4] / self.downscale).astype(np.int_)
mask[tlbr[1] : tlbr[3], tlbr[0] : tlbr[2]] = 0
keypoints = self.detector.detect(frame, mask)
# Compute the descriptors
keypoints, descriptors = self.extractor.compute(frame, keypoints)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
# Initialization done
self.initializedFirstFrame = True
return H
# Match descriptors
knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)
# Filter matches based on smallest spatial distance
matches = []
spatialDistances = []
maxSpatialDistance = 0.25 * np.array([width, height])
# Handle empty matches case
if len(knnMatches) == 0:
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
for m, n in knnMatches:
if m.distance < 0.9 * n.distance:
prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt
currKeyPointLocation = keypoints[m.trainIdx].pt
spatialDistance = (
prevKeyPointLocation[0] - currKeyPointLocation[0],
prevKeyPointLocation[1] - currKeyPointLocation[1],
)
if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and (
np.abs(spatialDistance[1]) < maxSpatialDistance[1]
):
spatialDistances.append(spatialDistance)
matches.append(m)
meanSpatialDistances = np.mean(spatialDistances, 0)
stdSpatialDistances = np.std(spatialDistances, 0)
inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
goodMatches = []
prevPoints = []
currPoints = []
for i in range(len(matches)):
if inliers[i, 0] and inliers[i, 1]:
goodMatches.append(matches[i])
prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
currPoints.append(keypoints[matches[i].trainIdx].pt)
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Draw the keypoint matches on the output image
# if False:
# import matplotlib.pyplot as plt
# matches_img = np.hstack((self.prevFrame, frame))
# matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
# W = self.prevFrame.shape[1]
# for m in goodMatches:
# prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
# curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
# curr_pt[0] += W
# color = np.random.randint(0, 255, 3)
# color = (int(color[0]), int(color[1]), int(color[2]))
#
# matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)
# matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)
# matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)
#
# plt.figure()
# plt.imshow(matches_img)
# plt.show()
# Find rigid matrix
if prevPoints.shape[0] > 4:
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning("WARNING: not enough matching points")
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
def applySparseOptFlow(self, raw_frame: np.array) -> np.array:
"""
Apply Sparse Optical Flow method to a raw frame.
Args:
raw_frame (np.ndarray): The raw frame to be processed.
Returns:
(np.ndarray): Processed frame.
Examples:
>>> gmc = GMC()
>>> gmc.applySparseOptFlow(np.array([[1, 2, 3], [4, 5, 6]]))
array([[1, 2, 3],
[4, 5, 6]])
"""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image
if self.downscale > 1.0:
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
# Find the keypoints
keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params)
# Handle first frame
if not self.initializedFirstFrame:
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.initializedFirstFrame = True
return H
# Find correspondences
matchedKeypoints, status, _ = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None)
# Leave good correspondences only
prevPoints = []
currPoints = []
for i in range(len(status)):
if status[i]:
prevPoints.append(self.prevKeyPoints[i])
currPoints.append(matchedKeypoints[i])
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Find rigid matrix
if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]):
H, _ = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning("WARNING: not enough matching points")
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
return H
def reset_params(self) -> None:
"""Reset parameters."""
self.prevFrame = None
self.prevKeyPoints = None
self.prevDescriptors = None
self.initializedFirstFrame = False
| 13,658 | Python | .py | 288 | 36.152778 | 119 | 0.589846 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,924 | Dockerfile-python | arojsubedi_Improved-YOLOv8s/docker/Dockerfile-python | # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments
# Use the official Python 3.10 slim-bookworm as base image
FROM python:3.10-slim-bookworm
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
RUN apt update \
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
# Create working directory
WORKDIR /usr/src/ultralytics
# Copy contents
# COPY . /usr/src/ultralytics # git permission issues inside container
RUN git clone https://github.com/ultralytics/ultralytics -b main /usr/src/ultralytics
ADD https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8n.pt /usr/src/ultralytics/
# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
# RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
# Install pip packages
RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN pip install --no-cache paddlepaddle>=2.6.0 x2paddle
# Remove exported models
RUN rm -rf tmp
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-python && sudo docker build -f docker/Dockerfile-python -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-python && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
| 2,547 | Python | .pyt | 40 | 62.025 | 143 | 0.744485 | arojsubedi/Improved-YOLOv8s | 8 | 5 | 0 | AGPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,925 | track.py | SBY7219_Yolov5_DeepSort_Replicate/track.py | # limit the number of cpus used by high performance libraries
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import sys
sys.path.insert(0, './yolov5')
from yolov5.models.experimental import attempt_load
from yolov5.utils.downloads import attempt_download
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.dataloaders import LoadImages, LoadStreams
from yolov5.utils.general import LOGGER, check_img_size, non_max_suppression, scale_boxes, check_imshow, xyxy2xywh
from yolov5.utils.torch_utils import select_device, time_sync
from yolov5.utils.plots import Annotator, colors
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
def detect(opt):
out, source, yolo_weights, deep_sort_weights, show_vid, save_vid, save_txt, imgsz, evaluate, half = \
opt.output, opt.source, opt.yolo_weights, opt.deep_sort_weights, opt.show_vid, opt.save_vid, \
opt.save_txt, opt.imgsz, opt.evaluate, opt.half
webcam = source == '0' or source.startswith(
'rtsp') or source.startswith('http') or source.endswith('.txt')
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
attempt_download(deep_sort_weights, repo='mikel-brostrom/Yolov5_DeepSort_Pytorch')
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device(opt.device)
half &= device.type != 'cpu' # half precision only supported on CUDA
# The MOT16 evaluation runs multiple inference streams in parallel, each one writing to
# its own .txt file. Hence, in that case, the output folder is not restored
if not evaluate:
if os.path.exists(out):
pass
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Load model
device = select_device(device)
model = DetectMultiBackend(opt.yolo_weights, device=device, dnn=opt.dnn)
stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx
imgsz = check_img_size(imgsz, s=stride) # check image size
# Half
half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
model.model.half() if half else model.model.float()
# Set Dataloader
vid_path, vid_writer = None, None
# Check if environment supports image displays
if show_vid:
show_vid = check_imshow()
# Dataloader
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = len(dataset) # batch_size
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
save_path = str(Path(out))
# 从 source 中提取文件名
txt_file_name = os.path.basename(source).split('.')[0]
# 使用 os.path.join 构造文件路径
txt_path = os.path.join(str(Path(out)), txt_file_name + '.txt')
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup
dt, seen = [0.0, 0.0, 0.0], 0
for frame_idx, (path, img, im0s, vid_cap, s) in enumerate(dataset):
t1 = time_sync()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
t2 = time_sync()
dt[0] += t2 - t1
# Inference
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if opt.visualize else False
pred = model(img, augment=opt.augment, visualize=visualize)
t3 = time_sync()
dt[1] += t3 - t2
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms, max_det=opt.max_det)
dt[2] += time_sync() - t3
# Process detections
for i, det in enumerate(pred): # detections per image
seen += 1
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f'{i}: '
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
s += '%gx%g ' % img.shape[2:] # print string
save_path = str(Path(out) / Path(p).name)
annotator = Annotator(im0, line_width=2, pil=not ascii)
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(
img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = xyxy2xywh(det[:, 0:4])
confs = det[:, 4]
clss = det[:, 5]
# pass detections to deepsort
outputs = deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
# draw boxes for visualization
# if len(outputs) > 0:
# for j, (output, conf) in enumerate(zip(outputs, confs)):
#
# bboxes = output[0:4]
# id = output[4]
# cls = output[5]
#
# c = int(cls) # integer class
# label = f'{id} {names[c]} {conf:.2f}'
# annotator.box_label(bboxes, label, color=colors(c, True))
#
# if save_txt:
# # to MOT format
# bbox_left = output[0]
# bbox_top = output[1]
# bbox_w = output[2] - output[0]
# bbox_h = output[3] - output[1]
# # Write MOT compliant results to file
# with open(txt_path, 'a') as f:
# f.write(('%g ' * 10 + '\n') % (frame_idx + 1, id, bbox_left,
# bbox_top, bbox_w, bbox_h, -1, -1, -1, -1)) # label format
# draw boxes for visualization
if len(outputs) > 0:
for j, (output, conf) in enumerate(zip(outputs, confs)):
bboxes = output[0:4]
id = output[4]
cls = output[5]
c = int(cls) # integer class
label = f'{id} {names[c]} {conf:.2f}'
annotator.box_label(bboxes, label, color=colors(c, True))
if save_txt and names[c] == 'person': # 只有当检测到的类别是行人时,才将其写入txt文件
# to MOT format
bbox_left = output[0]
bbox_top = output[1]
bbox_w = output[2] - output[0]
bbox_h = output[3] - output[1]
# Write MOT compliant results to file
with open(txt_path, 'a') as f:
f.write(('%g ' * 10 + '\n') % (frame_idx + 1, id, bbox_left,
bbox_top, bbox_w, bbox_h, -1, -1, -1,
-1)) # label format
else:
deepsort.increment_ages()
# Print time (inference-only)
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
# Stream results
im0 = annotator.result()
if show_vid:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_vid:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
if save_txt or save_vid:
print('Results saved to %s' % os.getcwd() + os.sep + out)
if platform == 'darwin': # MacOS
os.system('open ' + save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--yolo_weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--deep_sort_weights', type=str, default='deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7', help='ckpt.t7 path')
# file/folder, 0 for webcam
parser.add_argument('--source', type=str, default='0', help='source')
parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--show-vid', action='store_true', help='display tracking video results')
parser.add_argument('--save-vid', action='store_true', help='save video tracking results')
parser.add_argument('--save-txt', action='store_true', help='save MOT compliant results to *.txt')
# class 0 is person, 1 is bycicle, 2 is car... 79 is oven
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 16 17')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--evaluate', action='store_true', help='augmented inference')
parser.add_argument("--config_deepsort", type=str, default="deep_sort_pytorch/configs/deep_sort.yaml")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument('--visualize', action='store_true', help='visualize features')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detection per image')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
with torch.no_grad():
detect(opt)
| 12,612 | Python | .py | 230 | 42.904348 | 140 | 0.565363 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,926 | json_delete.py | SBY7219_Yolov5_DeepSort_Replicate/json_delete.py | import json
def delete_track_id(json_file, track_id):
# 读取json文件
with open(json_file, 'r') as f:
data = json.load(f)
# 遍历json文件的每一个元素
for frame in list(data['frames'].keys()):
for obj in list(data['frames'][frame]['cv_annotation'].keys()):
# 检查"track_id"是否等于输入的整数
if data['frames'][frame]['cv_annotation'][obj]['track_id'] == str(track_id):
# 如果等于,删除这个元素
del data['frames'][frame]['cv_annotation'][obj]
# 将修改后的数据写回json文件
with open(json_file, 'w') as f:
json.dump(data, f, indent=4)
# 使用示例
delete_track_id('C:/codefield/code_p/Yolov5_DeepSort_Pytorch-master/Yolov5_DeepSort_Pytorch-master/images/json/a_1.json', 58) | 824 | Python | .py | 17 | 35.176471 | 125 | 0.621083 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,927 | txt2json.py | SBY7219_Yolov5_DeepSort_Replicate/txt2json.py | import json
def parse_line(line):
"""将一行文本转换为数据字典"""
parts = line.strip().split()
frame_id, track_id, x, y, width, height = map(int, parts[:6])
return {
'frame_id': frame_id,
'track_id': track_id,
'bbox': [x, y, x + width, y + height] # 转换为对角坐标格式
}
def convert_to_json(input_file, video_name):
# 初始化数据结构,用于存储最终的结果
data = {
'video_name': video_name,
'frames': {}
}
# 初始化一个字典,用于跟踪每个对象的出现帧
track_frames = {}
with open(input_file, 'r') as file:
for line in file:
parsed = parse_line(line)
frame_id = parsed['frame_id']
track_id = parsed['track_id']
bbox = parsed['bbox']
frame_key = f'frame_{frame_id}'
track_key = f'objType_track_{track_id}'
# 更新track_frames字典,记录track_id出现在哪些frame_id中
if track_id not in track_frames:
track_frames[track_id] = []
track_frames[track_id].append(frame_id)
# 如果当前帧还没有被记录在data中,则添加
if frame_key not in data['frames']:
data['frames'][frame_key] = {'cv_annotation': {}}
# 更新或创建track_id对应的追踪信息
data['frames'][frame_key]['cv_annotation'][track_key] = {
'object_type': 'pedestrian', # 实际对象类型未知,根据需求更改
'track_id': str(track_id),
'bbox': bbox,
'observed_frames': []
}
# 填充每个track_id的observed_frames
for frame_data in data['frames'].values():
for track_key, track_info in frame_data['cv_annotation'].items():
track_id = int(track_info['track_id'])
track_info['observed_frames'] = track_frames[track_id]
return json.dumps(data, indent=4)
def save_json_to_file(json_str, output_file):
"""将JSON字符串保存到文件"""
with open(output_file, 'w') as file:
file.write(json_str)
# 指定输入文件和视频名称
input_file = 'C:/codefield/code_p/Yolov5_DeepSort_Pytorch-master/Yolov5_DeepSort_Pytorch-master/inference/output/a.txt'
video_name = 'C:/codefield/code_p/Yolov5_DeepSort_Pytorch-master/Yolov5_DeepSort_Pytorch-master/images/images/a.mp4'
# 指定输出JSON文件的路径
output_json_file = 'C:/codefield/code_p/Yolov5_DeepSort_Pytorch-master/Yolov5_DeepSort_Pytorch-master/images/json/a.json'
# 转换JSON并保存到文件
json_output = convert_to_json(input_file, video_name)
save_json_to_file(json_output, output_json_file)
print(f"JSON output has been saved to {output_json_file}")
| 2,860 | Python | .py | 59 | 33.694915 | 122 | 0.598457 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,928 | json_logger.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/json_logger.py | """
References:
https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f
"""
import json
from os import makedirs
from os.path import exists, join
from datetime import datetime
class JsonMeta(object):
HOURS = 3
MINUTES = 59
SECONDS = 59
PATH_TO_SAVE = 'LOGS'
DEFAULT_FILE_NAME = 'remaining'
class BaseJsonLogger(object):
"""
This is the base class that returns __dict__ of its own
it also returns the dicts of objects in the attributes that are list instances
"""
def dic(self):
# returns dicts of objects
out = {}
for k, v in self.__dict__.items():
if hasattr(v, 'dic'):
out[k] = v.dic()
elif isinstance(v, list):
out[k] = self.list(v)
else:
out[k] = v
return out
@staticmethod
def list(values):
# applies the dic method on items in the list
return [v.dic() if hasattr(v, 'dic') else v for v in values]
class Label(BaseJsonLogger):
"""
For each bounding box there are various categories with confidences. Label class keeps track of that information.
"""
def __init__(self, category: str, confidence: float):
self.category = category
self.confidence = confidence
class Bbox(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
labels (list): List of label module.
top (int):
left (int):
width (int):
height (int):
Args:
bbox_id (float):
top (int):
left (int):
width (int):
height (int):
References:
Check Label module for better understanding.
"""
def __init__(self, bbox_id, top, left, width, height):
self.labels = []
self.bbox_id = bbox_id
self.top = top
self.left = left
self.width = width
self.height = height
def add_label(self, category, confidence):
# adds category and confidence only if top_k is not exceeded.
self.labels.append(Label(category, confidence))
def labels_full(self, value):
return len(self.labels) == value
class Frame(BaseJsonLogger):
"""
This module stores the information for each frame and use them in JsonParser
Attributes:
timestamp (float): The elapsed time of captured frame
frame_id (int): The frame number of the captured video
bboxes (list of Bbox objects): Stores the list of bbox objects.
References:
Check Bbox class for better information
Args:
timestamp (float):
frame_id (int):
"""
def __init__(self, frame_id: int, timestamp: float = None):
self.frame_id = frame_id
self.timestamp = timestamp
self.bboxes = []
def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
if bbox_id not in bboxes_ids:
self.bboxes.append(Bbox(bbox_id, top, left, width, height))
else:
raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id))
def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
bboxes = {bbox.id: bbox for bbox in self.bboxes}
if bbox_id in bboxes.keys():
res = bboxes.get(bbox_id)
res.add_label(category, confidence)
else:
raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id))
class BboxToJsonLogger(BaseJsonLogger):
"""
Ù� This module is designed to automate the task of logging jsons. An example json is used
to show the contents of json file shortly
Example:
{
"video_details": {
"frame_width": 1920,
"frame_height": 1080,
"frame_rate": 20,
"video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi"
},
"frames": [
{
"frame_id": 329,
"timestamp": 3365.1254
"bboxes": [
{
"labels": [
{
"category": "pedestrian",
"confidence": 0.9
}
],
"bbox_id": 0,
"top": 1257,
"left": 138,
"width": 68,
"height": 109
}
]
}],
Attributes:
frames (dict): It's a dictionary that maps each frame_id to json attributes.
video_details (dict): information about video file.
top_k_labels (int): shows the allowed number of labels
start_time (datetime object): we use it to automate the json output by time.
Args:
top_k_labels (int): shows the allowed number of labels
"""
def __init__(self, top_k_labels: int = 1):
self.frames = {}
self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None,
video_name=None)
self.top_k_labels = top_k_labels
self.start_time = datetime.now()
def set_top_k(self, value):
self.top_k_labels = value
def frame_exists(self, frame_id: int) -> bool:
"""
Args:
frame_id (int):
Returns:
bool: true if frame_id is recognized
"""
return frame_id in self.frames.keys()
def add_frame(self, frame_id: int, timestamp: float = None) -> None:
"""
Args:
frame_id (int):
timestamp (float): opencv captured frame time property
Raises:
ValueError: if frame_id would not exist in class frames attribute
Returns:
None
"""
if not self.frame_exists(frame_id):
self.frames[frame_id] = Frame(frame_id, timestamp)
else:
raise ValueError("Frame id: {} already exists".format(frame_id))
def bbox_exists(self, frame_id: int, bbox_id: int) -> bool:
"""
Args:
frame_id:
bbox_id:
Returns:
bool: if bbox exists in frame bboxes list
"""
bboxes = []
if self.frame_exists(frame_id=frame_id):
bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes]
return bbox_id in bboxes
def find_bbox(self, frame_id: int, bbox_id: int):
"""
Args:
frame_id:
bbox_id:
Returns:
bbox_id (int):
Raises:
ValueError: if bbox_id does not exist in the bbox list of specific frame.
"""
if not self.bbox_exists(frame_id, bbox_id):
raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id))
bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes}
return bboxes.get(bbox_id)
def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None:
"""
Args:
frame_id (int):
bbox_id (int):
top (int):
left (int):
width (int):
height (int):
Returns:
None
Raises:
ValueError: if bbox_id already exist in frame information with frame_id
ValueError: if frame_id does not exist in frames attribute
"""
if self.frame_exists(frame_id):
frame = self.frames[frame_id]
if not self.bbox_exists(frame_id, bbox_id):
frame.add_bbox(bbox_id, top, left, width, height)
else:
raise ValueError(
"frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id))
else:
raise ValueError("frame with frame_id: {} does not exist".format(frame_id))
def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float):
"""
Args:
frame_id:
bbox_id:
category:
confidence: the confidence value returned from yolo detection
Returns:
None
Raises:
ValueError: if labels quota (top_k_labels) exceeds.
"""
bbox = self.find_bbox(frame_id, bbox_id)
if not bbox.labels_full(self.top_k_labels):
bbox.add_label(category, confidence)
else:
raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id))
def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None,
video_name: str = None):
self.video_details['frame_width'] = frame_width
self.video_details['frame_height'] = frame_height
self.video_details['frame_rate'] = frame_rate
self.video_details['video_name'] = video_name
def output(self):
output = {'video_details': self.video_details}
result = list(self.frames.values())
output['frames'] = [item.dic() for item in result]
return output
def json_output(self, output_name):
"""
Args:
output_name:
Returns:
None
Notes:
It creates the json output with `output_name` name.
"""
if not output_name.endswith('.json'):
output_name += '.json'
with open(output_name, 'w') as file:
json.dump(self.output(), file)
file.close()
def set_start(self):
self.start_time = datetime.now()
def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0,
seconds: int = 60) -> None:
"""
Notes:
Creates folder and then periodically stores the jsons on that address.
Args:
output_dir (str): the directory where output files will be stored
hours (int):
minutes (int):
seconds (int):
Returns:
None
"""
end = datetime.now()
interval = 0
interval += abs(min([hours, JsonMeta.HOURS]) * 3600)
interval += abs(min([minutes, JsonMeta.MINUTES]) * 60)
interval += abs(min([seconds, JsonMeta.SECONDS]))
diff = (end - self.start_time).seconds
if diff > interval:
output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json'
if not exists(output_dir):
makedirs(output_dir)
output = join(output_dir, output_name)
self.json_output(output_name=output)
self.frames = {}
self.start_time = datetime.now()
def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE):
"""
saves as the number of frames quota increases higher.
:param frames_quota:
:param frame_counter:
:param output_dir:
:return:
"""
pass
def flush(self, output_dir):
"""
Notes:
We use this function to output jsons whenever possible.
like the time that we exit the while loop of opencv.
Args:
output_dir:
Returns:
None
"""
filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json'
output = join(output_dir, filename)
self.json_output(output_name=output)
| 11,762 | Python | .py | 314 | 27.347134 | 129 | 0.563846 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,929 | io.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/io.py | import os
from typing import Dict
import numpy as np
# from utils.log import get_logger
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
# def write_results(filename, results_dict: Dict, data_type: str):
# if not filename:
# return
# path = os.path.dirname(filename)
# if not os.path.exists(path):
# os.makedirs(path)
# if data_type in ('mot', 'mcmot', 'lab'):
# save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
# elif data_type == 'kitti':
# save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
# else:
# raise ValueError(data_type)
# with open(filename, 'w') as f:
# for frame_id, frame_data in results_dict.items():
# if data_type == 'kitti':
# frame_id -= 1
# for tlwh, track_id in frame_data:
# if track_id < 0:
# continue
# x1, y1, w, h = tlwh
# x2, y2 = x1 + w, y1 + h
# line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
# f.write(line)
# logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores | 4,357 | Python | .py | 111 | 30.423423 | 121 | 0.493491 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,930 | evaluation.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/evaluation.py | import os
import numpy as np
import copy
import motmetrics as mm
mm.lap.default_solver = 'lap'
from utils.io import read_results, unzip_objs
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
if len(iou_distance) > 0:
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
| 3,532 | Python | .py | 80 | 35.1125 | 112 | 0.619131 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,931 | draw.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/draw.py | import numpy as np
import cv2
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def draw_boxes(img, bbox, identities=None, offset=(0,0)):
for i,box in enumerate(bbox):
x1,y1,x2,y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
return img
if __name__ == '__main__':
for i in range(82):
print(compute_color_for_labels(i))
| 1,125 | Python | .py | 28 | 33.607143 | 95 | 0.577594 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,932 | asserts.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/asserts.py | from os import environ
def assert_in(file, files_to_check):
if file not in files_to_check:
raise AssertionError("{} does not exist in the list".format(str(file)))
return True
def assert_in_env(check_list: list):
for item in check_list:
assert_in(item, environ.keys())
return True
| 316 | Python | .py | 9 | 30.111111 | 79 | 0.693069 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,933 | log.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/log.py | import logging
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
| 463 | Python | .py | 11 | 36.545455 | 98 | 0.661435 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,934 | tools.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/tools.py | from functools import wraps
from time import time
def is_video(ext: str):
"""
Returns true if ext exists in
allowed_exts for video files.
Args:
ext:
Returns:
"""
allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
return any((ext.endswith(x) for x in allowed_exts))
def tik_tok(func):
"""
keep track of time for each process.
Args:
func:
Returns:
"""
@wraps(func)
def _time_it(*args, **kwargs):
start = time()
try:
return func(*args, **kwargs)
finally:
end_ = time()
print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
return _time_it
| 734 | Python | .py | 28 | 19.821429 | 90 | 0.541007 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,935 | parser.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/utils/parser.py | import os
import yaml
from easydict import EasyDict as edict
class YamlParser(edict):
"""
This is yaml parser based on EasyDict.
"""
def __init__(self, cfg_dict=None, config_file=None):
if cfg_dict is None:
cfg_dict = {}
if config_file is not None:
assert(os.path.isfile(config_file))
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
cfg_dict.update(yaml_)
super(YamlParser, self).__init__(cfg_dict)
def merge_from_file(self, config_file):
with open(config_file, 'r') as fo:
yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
self.update(yaml_)
def merge_from_dict(self, config_dict):
self.update(config_dict)
def get_config(config_file=None):
return YamlParser(config_file=config_file)
if __name__ == "__main__":
cfg = YamlParser(config_file="../configs/yolov3.yaml")
cfg.merge_from_file("../configs/deep_sort.yaml")
import ipdb
ipdb.set_trace()
| 1,076 | Python | .py | 29 | 29.62069 | 68 | 0.619324 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,936 | __init__.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/__init__.py | from .deep_sort import DeepSort
__all__ = ['DeepSort', 'build_tracker']
def build_tracker(cfg, use_cuda):
return DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
| 500 | Python | .py | 7 | 60 | 126 | 0.694737 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,937 | deep_sort.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep_sort.py | import numpy as np
import torch
from .deep.feature_extractor import Extractor
from .sort.nn_matching import NearestNeighborDistanceMetric
from .sort.detection import Detection
from .sort.tracker import Tracker
__all__ = ['DeepSort']
class DeepSort(object):
def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.min_confidence = min_confidence
self.extractor = Extractor(model_path, use_cuda=use_cuda)
max_cosine_distance = max_dist
metric = NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(
metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
def update(self, bbox_xywh, confidences, classes, ori_img, use_yolo_preds=False):
self.height, self.width = ori_img.shape[:2]
# generate detections
features = self._get_features(bbox_xywh, ori_img)
bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(
confidences) if conf > self.min_confidence]
# run on non-maximum supression
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
# update tracker
self.tracker.predict()
self.tracker.update(detections, classes)
# output bbox identities
outputs = []
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
if use_yolo_preds:
det = track.get_yolo_pred()
x1, y1, x2, y2 = self._tlwh_to_xyxy(det.tlwh)
else:
box = track.to_tlwh()
x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
track_id = track.track_id
class_id = track.class_id
outputs.append(np.array([x1, y1, x2, y2, track_id, class_id], dtype=np.int64))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
"""
TODO:
Convert bbox from xc_yc_w_h to xtl_ytl_w_h
Thanks [email protected] for reporting this bug!
"""
@staticmethod
def _xywh_to_tlwh(bbox_xywh):
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
return bbox_tlwh
def _xywh_to_xyxy(self, bbox_xywh):
x, y, w, h = bbox_xywh
x1 = max(int(x - w / 2), 0)
x2 = min(int(x + w / 2), self.width - 1)
y1 = max(int(y - h / 2), 0)
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2
def _tlwh_to_xyxy(self, bbox_tlwh):
"""
TODO:
Convert bbox from xtl_ytl_w_h to xc_yc_w_h
Thanks [email protected] for reporting this bug!
"""
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), self.width - 1)
y1 = max(int(y), 0)
y2 = min(int(y+h), self.height - 1)
return x1, y1, x2, y2
def increment_ages(self):
self.tracker.increment_ages()
def _xyxy_to_tlwh(self, bbox_xyxy):
x1, y1, x2, y2 = bbox_xyxy
t = x1
l = y1
w = int(x2 - x1)
h = int(y2 - y1)
return t, l, w, h
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.extractor(im_crops)
else:
features = np.array([])
return features
| 3,990 | Python | .py | 99 | 31.161616 | 143 | 0.573974 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,938 | test.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/test.py | import torch
import torch.backends.cudnn as cudnn
import torchvision
import argparse
import os
from model import Net
parser = argparse.ArgumentParser(description="Train on market1501")
parser.add_argument("--data-dir", default='data', type=str)
parser.add_argument("--no-cuda", action="store_true")
parser.add_argument("--gpu-id", default=0, type=int)
args = parser.parse_args()
# device
device = "cuda:{}".format(
args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
if torch.cuda.is_available() and not args.no_cuda:
cudnn.benchmark = True
# data loader
root = args.data_dir
query_dir = os.path.join(root, "query")
gallery_dir = os.path.join(root, "gallery")
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((128, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
queryloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(query_dir, transform=transform),
batch_size=64, shuffle=False
)
galleryloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(gallery_dir, transform=transform),
batch_size=64, shuffle=False
)
# net definition
net = Net(reid=True)
assert os.path.isfile(
"./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
print('Loading from checkpoint/ckpt.t7')
checkpoint = torch.load("./checkpoint/ckpt.t7")
net_dict = checkpoint['net_dict']
net.load_state_dict(net_dict, strict=False)
net.eval()
net.to(device)
# compute features
query_features = torch.tensor([]).float()
query_labels = torch.tensor([]).long()
gallery_features = torch.tensor([]).float()
gallery_labels = torch.tensor([]).long()
with torch.no_grad():
for idx, (inputs, labels) in enumerate(queryloader):
inputs = inputs.to(device)
features = net(inputs).cpu()
query_features = torch.cat((query_features, features), dim=0)
query_labels = torch.cat((query_labels, labels))
for idx, (inputs, labels) in enumerate(galleryloader):
inputs = inputs.to(device)
features = net(inputs).cpu()
gallery_features = torch.cat((gallery_features, features), dim=0)
gallery_labels = torch.cat((gallery_labels, labels))
gallery_labels -= 2
# save features
features = {
"qf": query_features,
"ql": query_labels,
"gf": gallery_features,
"gl": gallery_labels
}
torch.save(features, "features.pth")
| 2,464 | Python | .py | 69 | 32.57971 | 77 | 0.721477 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,939 | train.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/train.py | import argparse
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.backends.cudnn as cudnn
import torchvision
from model import Net
parser = argparse.ArgumentParser(description="Train on market1501")
parser.add_argument("--data-dir", default='data', type=str)
parser.add_argument("--no-cuda", action="store_true")
parser.add_argument("--gpu-id", default=0, type=int)
parser.add_argument("--lr", default=0.1, type=float)
parser.add_argument("--interval", '-i', default=20, type=int)
parser.add_argument('--resume', '-r', action='store_true')
args = parser.parse_args()
# device
device = "cuda:{}".format(
args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
if torch.cuda.is_available() and not args.no_cuda:
cudnn.benchmark = True
# data loading
root = args.data_dir
train_dir = os.path.join(root, "train")
test_dir = os.path.join(root, "test")
transform_train = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop((128, 64), padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transform_test = torchvision.transforms.Compose([
torchvision.transforms.Resize((128, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(train_dir, transform=transform_train),
batch_size=64, shuffle=True
)
testloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(test_dir, transform=transform_test),
batch_size=64, shuffle=True
)
num_classes = max(len(trainloader.dataset.classes),
len(testloader.dataset.classes))
# net definition
start_epoch = 0
net = Net(num_classes=num_classes)
if args.resume:
assert os.path.isfile(
"./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
print('Loading from checkpoint/ckpt.t7')
checkpoint = torch.load("./checkpoint/ckpt.t7")
# import ipdb; ipdb.set_trace()
net_dict = checkpoint['net_dict']
net.load_state_dict(net_dict)
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
net.to(device)
# loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
net.parameters(), args.lr, momentum=0.9, weight_decay=5e-4)
best_acc = 0.
# train function for each epoch
def train(epoch):
print("\nEpoch : %d" % (epoch+1))
net.train()
training_loss = 0.
train_loss = 0.
correct = 0
total = 0
interval = args.interval
start = time.time()
for idx, (inputs, labels) in enumerate(trainloader):
# forward
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# accumurating
training_loss += loss.item()
train_loss += loss.item()
correct += outputs.max(dim=1)[1].eq(labels).sum().item()
total += labels.size(0)
# print
if (idx+1) % interval == 0:
end = time.time()
print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
100.*(idx+1)/len(trainloader), end-start, training_loss /
interval, correct, total, 100.*correct/total
))
training_loss = 0.
start = time.time()
return train_loss/len(trainloader), 1. - correct/total
def test(epoch):
global best_acc
net.eval()
test_loss = 0.
correct = 0
total = 0
start = time.time()
with torch.no_grad():
for idx, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
test_loss += loss.item()
correct += outputs.max(dim=1)[1].eq(labels).sum().item()
total += labels.size(0)
print("Testing ...")
end = time.time()
print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
100.*(idx+1)/len(testloader), end-start, test_loss /
len(testloader), correct, total, 100.*correct/total
))
# saving checkpoint
acc = 100.*correct/total
if acc > best_acc:
best_acc = acc
print("Saving parameters to checkpoint/ckpt.t7")
checkpoint = {
'net_dict': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(checkpoint, './checkpoint/ckpt.t7')
return test_loss/len(testloader), 1. - correct/total
# plot figure
x_epoch = []
record = {'train_loss': [], 'train_err': [], 'test_loss': [], 'test_err': []}
fig = plt.figure()
ax0 = fig.add_subplot(121, title="loss")
ax1 = fig.add_subplot(122, title="top1err")
def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
global record
record['train_loss'].append(train_loss)
record['train_err'].append(train_err)
record['test_loss'].append(test_loss)
record['test_err'].append(test_err)
x_epoch.append(epoch)
ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
if epoch == 0:
ax0.legend()
ax1.legend()
fig.savefig("train.jpg")
# lr decay
def lr_decay():
global optimizer
for params in optimizer.param_groups:
params['lr'] *= 0.1
lr = params['lr']
print("Learning rate adjusted to {}".format(lr))
def main():
for epoch in range(start_epoch, start_epoch+40):
train_loss, train_err = train(epoch)
test_loss, test_err = test(epoch)
draw_curve(epoch, train_loss, train_err, test_loss, test_err)
if (epoch+1) % 20 == 0:
lr_decay()
if __name__ == '__main__':
main()
| 6,315 | Python | .py | 174 | 30.477011 | 96 | 0.636274 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,940 | original_model.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/original_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, c_in, c_out, is_downsample=False):
super(BasicBlock, self).__init__()
self.is_downsample = is_downsample
if is_downsample:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=2, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(c_out)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(c_out)
if is_downsample:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
nn.BatchNorm2d(c_out)
)
elif c_in != c_out:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
nn.BatchNorm2d(c_out)
)
self.is_downsample = True
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if self.is_downsample:
x = self.downsample(x)
return F.relu(x.add(y), True)
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
blocks = []
for i in range(repeat_times):
if i == 0:
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
else:
blocks += [BasicBlock(c_out, c_out), ]
return nn.Sequential(*blocks)
class Net(nn.Module):
def __init__(self, num_classes=625, reid=False):
super(Net, self).__init__()
# 3 128 64
self.conv = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ELU(inplace=True),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ELU(inplace=True),
nn.MaxPool2d(3, 2, padding=1),
)
# 32 64 32
self.layer1 = make_layers(32, 32, 2, False)
# 32 64 32
self.layer2 = make_layers(32, 64, 2, True)
# 64 32 16
self.layer3 = make_layers(64, 128, 2, True)
# 128 16 8
self.dense = nn.Sequential(
nn.Dropout(p=0.6),
nn.Linear(128*16*8, 128),
nn.BatchNorm1d(128),
nn.ELU(inplace=True)
)
# 256 1 1
self.reid = reid
self.batch_norm = nn.BatchNorm1d(128)
self.classifier = nn.Sequential(
nn.Linear(128, num_classes),
)
def forward(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = x.view(x.size(0), -1)
if self.reid:
x = self.dense[0](x)
x = self.dense[1](x)
x = x.div(x.norm(p=2, dim=1, keepdim=True))
return x
x = self.dense(x)
# B x 128
# classifier
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Net(reid=True)
x = torch.randn(4, 3, 128, 64)
y = net(x)
import ipdb
ipdb.set_trace()
| 3,339 | Python | .py | 100 | 23.57 | 78 | 0.515799 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,941 | feature_extractor.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/feature_extractor.py | import torch
import torchvision.transforms as transforms
import numpy as np
import cv2
import logging
from .model import Net
class Extractor(object):
def __init__(self, model_path, use_cuda=True):
self.net = Net(reid=True)
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
state_dict = torch.load(model_path, map_location=torch.device(self.device))[
'net_dict']
self.net.load_state_dict(state_dict)
logger = logging.getLogger("root.tracker")
logger.info("Loading weights from {}... Done!".format(model_path))
self.net.to(self.device)
self.size = (64, 128)
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
def _preprocess(self, im_crops):
"""
TODO:
1. to float with scale from 0 to 1
2. resize to (64, 128) as Market1501 dataset did
3. concatenate to a numpy array
3. to torch Tensor
4. normalize
"""
def _resize(im, size):
return cv2.resize(im.astype(np.float32)/255., size)
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(
0) for im in im_crops], dim=0).float()
return im_batch
def __call__(self, im_crops):
im_batch = self._preprocess(im_crops)
with torch.no_grad():
im_batch = im_batch.to(self.device)
features = self.net(im_batch)
return features.cpu().numpy()
if __name__ == '__main__':
img = cv2.imread("demo.jpg")[:, :, (2, 1, 0)]
extr = Extractor("checkpoint/ckpt.t7")
feature = extr(img)
print(feature.shape)
| 1,770 | Python | .py | 46 | 30.26087 | 84 | 0.594406 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,942 | model.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, c_in, c_out, is_downsample=False):
super(BasicBlock, self).__init__()
self.is_downsample = is_downsample
if is_downsample:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=2, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(
c_in, c_out, 3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(c_out)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(c_out)
if is_downsample:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
nn.BatchNorm2d(c_out)
)
elif c_in != c_out:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
nn.BatchNorm2d(c_out)
)
self.is_downsample = True
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if self.is_downsample:
x = self.downsample(x)
return F.relu(x.add(y), True)
def make_layers(c_in, c_out, repeat_times, is_downsample=False):
blocks = []
for i in range(repeat_times):
if i == 0:
blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
else:
blocks += [BasicBlock(c_out, c_out), ]
return nn.Sequential(*blocks)
class Net(nn.Module):
def __init__(self, num_classes=751, reid=False):
super(Net, self).__init__()
# 3 128 64
self.conv = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# nn.Conv2d(32,32,3,stride=1,padding=1),
# nn.BatchNorm2d(32),
# nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, padding=1),
)
# 32 64 32
self.layer1 = make_layers(64, 64, 2, False)
# 32 64 32
self.layer2 = make_layers(64, 128, 2, True)
# 64 32 16
self.layer3 = make_layers(128, 256, 2, True)
# 128 16 8
self.layer4 = make_layers(256, 512, 2, True)
# 256 8 4
self.avgpool = nn.AvgPool2d((8, 4), 1)
# 256 1 1
self.reid = reid
self.classifier = nn.Sequential(
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(256, num_classes),
)
def forward(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# B x 128
if self.reid:
x = x.div(x.norm(p=2, dim=1, keepdim=True))
return x
# classifier
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Net()
x = torch.randn(4, 3, 128, 64)
y = net(x)
import ipdb
ipdb.set_trace()
| 3,316 | Python | .py | 99 | 23.757576 | 78 | 0.516682 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,943 | evaluate.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/deep/evaluate.py | import torch
features = torch.load("features.pth")
qf = features["qf"]
ql = features["ql"]
gf = features["gf"]
gl = features["gl"]
scores = qf.mm(gf.t())
res = scores.topk(5, dim=1)[1][:, 0]
top1correct = gl[res].eq(ql).sum().item()
print("Acc top1:{:.3f}".format(top1correct / ql.size(0)))
| 294 | Python | .py | 10 | 28.1 | 57 | 0.658363 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,944 | iou_matching.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/iou_matching.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import linear_assignment
def iou(bbox, candidates):
"""Computer intersection over union.
Parameters
----------
bbox : ndarray
A bounding box in format `(top left x, top left y, width, height)`.
candidates : ndarray
A matrix of candidate bounding boxes (one per row) in the same format
as `bbox`.
Returns
-------
ndarray
The intersection over union in [0, 1] between the `bbox` and each
candidate. A higher score means a larger fraction of the `bbox` is
occluded by the candidate.
"""
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
return area_intersection / (area_bbox + area_candidates - area_intersection)
def iou_cost(tracks, detections, track_indices=None,
detection_indices=None):
"""An intersection over union distance metric.
Parameters
----------
tracks : List[deep_sort.track.Track]
A list of tracks.
detections : List[deep_sort.detection.Detection]
A list of detections.
track_indices : Optional[List[int]]
A list of indices to tracks that should be matched. Defaults to
all `tracks`.
detection_indices : Optional[List[int]]
A list of indices to detections that should be matched. Defaults
to all `detections`.
Returns
-------
ndarray
Returns a cost matrix of shape
len(track_indices), len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = linear_assignment.INFTY_COST
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray(
[detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou(bbox, candidates)
return cost_matrix
| 2,843 | Python | .py | 68 | 34.911765 | 80 | 0.635277 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,945 | track.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/track.py | # vim: expandtab:ts=4:sw=4
class TrackState:
"""
Enumeration type for the single target track state. Newly created tracks are
classified as `tentative` until enough evidence has been collected. Then,
the track state is changed to `confirmed`. Tracks that are no longer alive
are classified as `deleted` to mark them for removal from the set of active
tracks.
"""
Tentative = 1
Confirmed = 2
Deleted = 3
class Track:
"""
A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
max_age : int
The maximum number of consecutive misses before the track state is
set to `Deleted`.
feature : Optional[ndarray]
Feature vector of the detection this track originates from. If not None,
this feature is added to the `features` cache.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
track_id : int
A unique track identifier.
hits : int
Total number of measurement updates.
age : int
Total number of frames since first occurance.
time_since_update : int
Total number of frames since last measurement update.
state : TrackState
The current track state.
features : List[ndarray]
A cache of features. On each measurement update, the associated feature
vector is added to this list.
"""
def __init__(self, mean, covariance, track_id, class_id, n_init, max_age,
feature=None):
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.class_id = class_id
self.hits = 1
self.age = 1
self.time_since_update = 0
self.yolo_bbox = [0, 0, 0, 0]
self.state = TrackState.Tentative
self.features = []
if feature is not None:
self.features.append(feature)
self._n_init = n_init
self._max_age = max_age
def to_tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
Returns
-------
ndarray
The bounding box.
"""
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
def to_tlbr(self):
"""Get kf estimated current position in bounding box format `(min x, miny, max x,
max y)`.
Returns
-------
ndarray
The predicted kf bounding box.
"""
ret = self.to_tlwh()
ret[2:] = ret[:2] + ret[2:]
return ret
def get_yolo_pred(self):
"""Get yolo prediction`.
Returns
-------
ndarray
The yolo bounding box.
"""
return self.yolo_bbox
def increment_age(self):
self.age += 1
self.time_since_update += 1
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
self.increment_age()
def update(self, kf, detection, class_id):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
detection : Detection
The associated detection.
"""
self.yolo_bbox = detection
self.mean, self.covariance = kf.update(
self.mean, self.covariance, detection.to_xyah())
self.features.append(detection.feature)
self.class_id = class_id
self.hits += 1
self.time_since_update = 0
if self.state == TrackState.Tentative and self.hits >= self._n_init:
self.state = TrackState.Confirmed
def mark_missed(self):
"""Mark this track as missed (no association at the current time step).
"""
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self._max_age:
self.state = TrackState.Deleted
def is_tentative(self):
"""Returns True if this track is tentative (unconfirmed).
"""
return self.state == TrackState.Tentative
def is_confirmed(self):
"""Returns True if this track is confirmed."""
return self.state == TrackState.Confirmed
def is_deleted(self):
"""Returns True if this track is dead and should be deleted."""
return self.state == TrackState.Deleted
| 5,410 | Python | .py | 151 | 27.582781 | 89 | 0.611175 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,946 | preprocessing.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/preprocessing.py | # vim: expandtab:ts=4:sw=4
import numpy as np
import cv2
def non_max_suppression(boxes, max_bbox_overlap, scores=None):
"""Suppress overlapping detections.
Original code from [1]_ has been adapted to include confidence score.
.. [1] http://www.pyimagesearch.com/2015/02/16/
faster-non-maximum-suppression-python/
Examples
--------
>>> boxes = [d.roi for d in detections]
>>> scores = [d.confidence for d in detections]
>>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
>>> detections = [detections[i] for i in indices]
Parameters
----------
boxes : ndarray
Array of ROIs (x, y, width, height).
max_bbox_overlap : float
ROIs that overlap more than this values are suppressed.
scores : Optional[array_like]
Detector confidence score.
Returns
-------
List[int]
Returns indices of detections that have survived non-maxima suppression.
"""
if len(boxes) == 0:
return []
boxes = boxes.astype(np.float)
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2] + boxes[:, 0]
y2 = boxes[:, 3] + boxes[:, 1]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
if scores is not None:
idxs = np.argsort(scores)
else:
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(
idxs, np.concatenate(
([last], np.where(overlap > max_bbox_overlap)[0])))
return pick
| 1,914 | Python | .py | 55 | 27.672727 | 80 | 0.571972 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,947 | nn_matching.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/nn_matching.py | # vim: expandtab:ts=4:sw=4
import numpy as np
def _pdist(a, b):
"""Compute pair-wise squared distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
a, b = np.asarray(a), np.asarray(b)
if len(a) == 0 or len(b) == 0:
return np.zeros((len(a), len(b)))
a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
r2 = np.clip(r2, 0., float(np.inf))
return r2
def _cosine_distance(a, b, data_is_normalized=False):
"""Compute pair-wise cosine distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
data_is_normalized : Optional[bool]
If True, assumes rows in a and b are unit length vectors.
Otherwise, a and b are explicitly normalized to lenght 1.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
if not data_is_normalized:
a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
return 1. - np.dot(a, b.T)
def _nn_euclidean_distance(x, y):
""" Helper function for nearest neighbor distance metric (Euclidean).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest Euclidean distance to a sample in `x`.
"""
distances = _pdist(x, y)
return np.maximum(0.0, distances.min(axis=0))
def _nn_cosine_distance(x, y):
""" Helper function for nearest neighbor distance metric (cosine).
Parameters
----------
x : ndarray
A matrix of N row-vectors (sample points).
y : ndarray
A matrix of M row-vectors (query points).
Returns
-------
ndarray
A vector of length M that contains for each entry in `y` the
smallest cosine distance to a sample in `x`.
"""
distances = _cosine_distance(x, y)
return distances.min(axis=0)
class NearestNeighborDistanceMetric(object):
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
Parameters
----------
metric : str
Either "euclidean" or "cosine".
matching_threshold: float
The matching threshold. Samples with larger distance are considered an
invalid match.
budget : Optional[int]
If not None, fix samples per class to at most this number. Removes
the oldest samples when the budget is reached.
Attributes
----------
samples : Dict[int -> List[ndarray]]
A dictionary that maps from target identities to the list of samples
that have been observed so far.
"""
def __init__(self, metric, matching_threshold, budget=None):
if metric == "euclidean":
self._metric = _nn_euclidean_distance
elif metric == "cosine":
self._metric = _nn_cosine_distance
else:
raise ValueError(
"Invalid metric; must be either 'euclidean' or 'cosine'")
self.matching_threshold = matching_threshold
self.budget = budget
self.samples = {}
def partial_fit(self, features, targets, active_targets):
"""Update the distance metric with new data.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : ndarray
An integer array of associated target identities.
active_targets : List[int]
A list of targets that are currently present in the scene.
"""
for feature, target in zip(features, targets):
self.samples.setdefault(target, []).append(feature)
if self.budget is not None:
self.samples[target] = self.samples[target][-self.budget:]
self.samples = {k: self.samples[k] for k in active_targets}
def distance(self, features, targets):
"""Compute distance between features and targets.
Parameters
----------
features : ndarray
An NxM matrix of N features of dimensionality M.
targets : List[int]
A list of targets to match the given `features` against.
Returns
-------
ndarray
Returns a cost matrix of shape len(targets), len(features), where
element (i, j) contains the closest squared distance between
`targets[i]` and `features[j]`.
"""
cost_matrix = np.zeros((len(targets), len(features)))
for i, target in enumerate(targets):
cost_matrix[i, :] = self._metric(self.samples[target], features)
return cost_matrix | 5,447 | Python | .py | 142 | 30.964789 | 78 | 0.616852 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,948 | kalman_filter.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/kalman_filter.py | # vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
class KalmanFilter(object):
"""
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, a, h, vx, vy, va, vh
contains the bounding box center position (x, y), aspect ratio a, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, a, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, a, h) with center position (x, y),
aspect ratio a, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[0], # the center point x
2 * self._std_weight_position * measurement[1], # the center point y
1 * measurement[2], # the ratio of width/height
2 * self._std_weight_position * measurement[3], # the height
10 * self._std_weight_velocity * measurement[0],
10 * self._std_weight_velocity * measurement[1],
0.1 * measurement[2],
10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[0],
self._std_weight_position * mean[1],
1 * mean[2],
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[0],
self._std_weight_velocity * mean[1],
0.1 * mean[2],
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(self._motion_mat, mean)
covariance = np.linalg.multi_dot((
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[0],
self._std_weight_position * mean[1],
0.1 * mean[2],
self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((
self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def update(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements,
only_position=False):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
cholesky_factor = np.linalg.cholesky(covariance)
d = measurements - mean
z = scipy.linalg.solve_triangular(
cholesky_factor, d.T, lower=True, check_finite=False,
overwrite_b=True)
squared_maha = np.sum(z * z, axis=0)
return squared_maha
| 7,959 | Python | .py | 189 | 32.444444 | 89 | 0.598422 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,949 | tracker.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/tracker.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4])
def __init__(self, metric, max_iou_distance=0.9, max_age=30, n_init=3, _lambda=0):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self._lambda = _lambda
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
def increment_ages(self):
for track in self.tracks:
track.increment_age()
track.mark_missed()
def update(self, detections, classes):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
self.kf, detections[detection_idx], classes[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], classes[detection_idx].item())
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
def _full_cost_metric(self, tracks, dets, track_indices, detection_indices):
"""
This implements the full lambda-based cost-metric. However, in doing so, it disregards
the possibility to gate the position only which is provided by
linear_assignment.gate_cost_matrix(). Instead, I gate by everything.
Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine
distance being normalised, we employ a quick and dirty normalisation based on the
threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring
that the valid values range 0-1.
Note also that the authors work with the squared distance. I also sqrt this, so that it
is more intuitive in terms of values.
"""
# Compute First the Position-based Cost Matrix
pos_cost = np.empty([len(track_indices), len(detection_indices)])
msrs = np.asarray([dets[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
pos_cost[row, :] = np.sqrt(
self.kf.gating_distance(
tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False
)
) / self.GATING_THRESHOLD
pos_gate = pos_cost > 1.0
# Now Compute the Appearance-based Cost Matrix
app_cost = self.metric.distance(
np.array([dets[i].feature for i in detection_indices]),
np.array([tracks[i].track_id for i in track_indices]),
)
app_gate = app_cost > self.metric.matching_threshold
# Now combine and threshold
cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost
cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST
# Return Matrix
return cost_matrix
def _match(self, detections):
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = linear_assignment.matching_cascade(
self._full_cost_metric,
linear_assignment.INFTY_COST - 1, # no need for self.metric.matching_threshold here,
self.max_age,
self.tracks,
detections,
confirmed_tracks,
)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if self.tracks[k].time_since_update == 1
]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if self.tracks[k].time_since_update != 1
]
matches_b, unmatched_tracks_b, unmatched_detections = linear_assignment.min_cost_matching(
iou_matching.iou_cost,
self.max_iou_distance,
self.tracks,
detections,
iou_track_candidates,
unmatched_detections,
)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def _initiate_track(self, detection, class_id):
mean, covariance = self.kf.initiate(detection.to_xyah())
self.tracks.append(Track(
mean, covariance, self._next_id, class_id, self.n_init, self.max_age,
detection.feature))
self._next_id += 1
| 7,085 | Python | .py | 152 | 37.388158 | 98 | 0.643239 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,950 | linear_assignment.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/linear_assignment.py | # vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from scipy.optimize import linear_sum_assignment
from . import kalman_filter
INFTY_COST = 1e+5
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
row_indices, col_indices = linear_sum_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
for col, detection_idx in enumerate(detection_indices):
if col not in col_indices:
unmatched_detections.append(detection_idx)
for row, track_idx in enumerate(track_indices):
if row not in row_indices:
unmatched_tracks.append(track_idx)
for row, col in zip(row_indices, col_indices):
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
cascade_depth: int
The cascade depth, should be se to the maximum track age.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
for level in range(cascade_depth):
if len(unmatched_detections) == 0: # No detections left
break
track_indices_l = [
k for k in track_indices
if tracks[k].time_since_update == 1 + level
]
if len(track_indices_l) == 0: # Nothing to match at this level
continue
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
def gate_cost_matrix(
kf, cost_matrix, tracks, detections, track_indices, detection_indices,
gated_cost=INFTY_COST, only_position=False):
"""Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
Parameters
----------
kf : The Kalman filter.
cost_matrix : ndarray
The NxM dimensional cost matrix, where N is the number of track indices
and M is the number of detection indices, such that entry (i, j) is the
association cost between `tracks[track_indices[i]]` and
`detections[detection_indices[j]]`.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
gated_cost : Optional[float]
Entries in the cost matrix corresponding to infeasible associations are
set this value. Defaults to a very large value.
only_position : Optional[bool]
If True, only the x, y position of the state distribution is considered
during gating. Defaults to False.
Returns
-------
ndarray
Returns the modified cost matrix.
"""
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray(
[detections[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
track = tracks[track_idx]
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
return cost_matrix | 7,801 | Python | .py | 167 | 39.287425 | 93 | 0.678914 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,951 | detection.py | SBY7219_Yolov5_DeepSort_Replicate/deep_sort_pytorch/deep_sort/sort/detection.py | # vim: expandtab:ts=4:sw=4
import numpy as np
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Parameters
----------
tlwh : array_like
Bounding box in format `(x, y, w, h)`.
confidence : float
Detector confidence score.
feature : array_like
A feature vector that describes the object contained in this image.
Attributes
----------
tlwh : ndarray
Bounding box in format `(top left x, top left y, width, height)`.
confidence : ndarray
Detector confidence score.
feature : ndarray | NoneType
A feature vector that describes the object contained in this image.
"""
def __init__(self, tlwh, confidence, feature):
self.tlwh = np.asarray(tlwh, dtype=np.float64)
self.confidence = float(confidence)
self.feature = np.asarray(feature, dtype=np.float32)
def to_tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
| 1,435 | Python | .py | 41 | 27.95122 | 79 | 0.602453 | SBY7219/Yolov5_DeepSort_Replicate | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,952 | hubconf.py | JSchlensok_VespaG/hubconf.py | from vespag.utils import DEFAULT_MODEL_PARAMETERS, load_model
from vespag.utils.type_hinting import EmbeddingType
dependencies = ["torch"]
def v2(embedding_type: EmbeddingType):
params = DEFAULT_MODEL_PARAMETERS
params["embedding_type"] = embedding_type
return load_model(**params)
| 296 | Python | .py | 7 | 39.285714 | 61 | 0.787456 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,953 | __main__.py | JSchlensok_VespaG/vespag/__main__.py | from pathlib import Path
from typing import Annotated, Optional
import typer
from .data.embeddings import generate_embeddings
from .eval import eval
from .predict import generate_predictions
from .training.train import train as run_training
from .utils.type_hinting import EmbeddingType
app = typer.Typer()
app.add_typer(eval.app, name="eval")
@app.command()
def predict(
fasta_file: Annotated[
Path,
typer.Option(
"-i",
"--input",
help="Path to FASTA-formatted file containing protein sequence(s)",
),
],
output_path: Annotated[
Path,
typer.Option(
"-o",
"--output",
help="Path for saving created CSV and/or H5 files. Defaults to ./output",
),
] = None,
embedding_file: Annotated[
Path,
typer.Option(
"-e",
"--embeddings",
help="Path to pre-generated input embeddings. Embeddings will be generated from scratch if no path is provided.",
),
] = None,
mutation_file: Annotated[
Path,
typer.Option(
"--mutation-file", help="CSV file specifying specific mutations to score"
),
] = None,
id_map_file: Annotated[
Path,
typer.Option(
"--id-map",
help="CSV file mapping embedding IDs to FASTA IDs if they're different",
),
] = None,
single_csv: Annotated[
Optional[bool],
typer.Option(
"--single-csv/--multi-csv",
help="Whether to return one CSV file for all proteins instead of a single file for each protein",
),
] = False,
no_csv: Annotated[
bool,
typer.Option(
"--no-csv/--csv", help="Whether no CSV output should be produced at all"
),
] = False,
h5_output: Annotated[
bool,
typer.Option(
"--h5-output/--no-h5-output",
help="Whether a file containing predictions in HDF5 format should be created",
),
] = False,
zero_based_mutations: Annotated[
bool,
typer.Option(
"--zero-idx/--one-idx",
help="Whether to enumerate the sequence starting at 0",
),
] = False,
transform_scores: Annotated[
bool,
typer.Option(
"--transform/--dont-transform",
help="Whether to transform scores to same distribution as GEMME scores",
),
] = True,
normalize_scores: Annotated[
bool,
typer.Option(
"--normalize/--dont-normalize",
help="Whether to transform scores to [0, 1] range",
),
] = True,
embedding_type: Annotated[
EmbeddingType,
typer.Option(
"--embedding-type", help="Type of pLM used for generating embeddings"
),
] = "esm2",
) -> None:
id_map_file = None
generate_predictions(
fasta_file,
output_path,
embedding_file,
mutation_file,
id_map_file,
single_csv,
no_csv,
h5_output,
zero_based_mutations,
normalize_scores,
embedding_type,
)
@app.command()
def embed(
input_fasta_file: Annotated[Path, typer.Argument(help="Path of input FASTA file")],
output_h5_file: Annotated[
Path, typer.Argument(help="Path for saving HDF5 file with computed embeddings")
],
cache_dir: Annotated[
Path,
typer.Option(
"-c", "--cache-dir", help="Custom path to download model checkpoints to"
),
],
embedding_type: Annotated[
EmbeddingType,
typer.Option(
"-e",
"--embedding-type",
case_sensitive=False,
help="Type of embeddings to generate",
),
] = EmbeddingType.esm2,
pretrained_path: Annotated[
str,
typer.Option("--pretrained-path", help="Path or URL of pretrained transformer"),
] = None,
):
generate_embeddings(
input_fasta_file, output_h5_file, cache_dir, embedding_type, pretrained_path
)
@app.command()
def train(
model_config_key: Annotated[str, typer.Option("--model")],
datasets: Annotated[list[str], typer.Option("--dataset")],
output_dir: Annotated[Path, typer.Option("--output-dir", "-o")],
embedding_type: Annotated[str, typer.Option("--embedding-type", "-e")],
compute_full_train_loss: Annotated[bool, typer.Option("--full-train-loss")] = False,
sampling_strategy: Annotated[str, typer.Option("--sampling-strategy")] = "basic",
wandb_config: Annotated[tuple[str, str], typer.Option("--wandb")] = None,
limit_cache: Annotated[bool, typer.Option("--limit-cache")] = False,
use_full_dataset: Annotated[bool, typer.Option("--use-full-dataset")] = False,
):
run_training(
model_config_key,
datasets,
output_dir,
embedding_type,
compute_full_train_loss,
sampling_strategy,
wandb_config,
limit_cache,
use_full_dataset,
)
if __name__ == "__main__":
app()
| 5,114 | Python | .py | 165 | 23.4 | 125 | 0.592263 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,954 | embeddings.py | JSchlensok_VespaG/vespag/data/embeddings.py | import re
from pathlib import Path
from typing import Annotated, Union
import h5py
import rich.progress as progress
import torch
import typer
from Bio import SeqIO
from transformers import AutoModel, AutoTokenizer, T5EncoderModel, T5Tokenizer
from vespag.utils import get_device
from vespag.utils.type_hinting import EmbeddingType
model_names = {
"esm2": "facebook/esm2_t36_3B_UR50D",
"prott5": "Rostlab/prot_t5_xl_uniref50",
}
# TODO implement generation of overlapping embeddings
class Embedder:
def __init__(
self, pretrained_path: Union[Path, str], cache_dir: Path = None
) -> None:
device = get_device()
self.device = device
if "t5" in pretrained_path:
tokenizer_class = T5Tokenizer
encoder_class = T5EncoderModel
else:
tokenizer_class = AutoTokenizer
encoder_class = AutoModel
kwargs = {}
if cache_dir:
kwargs["cache_dir"] = cache_dir
self.tokenizer = tokenizer_class.from_pretrained(
pretrained_path, **kwargs, do_lower_case=False
)
self.encoder = encoder_class.from_pretrained(pretrained_path, **kwargs).to(
device
)
self.encoder = (
self.encoder.half()
if device == torch.device("cuda:0")
else self.encoder.float()
)
@staticmethod
def batch(sequences: dict[str, str], max_batch_length: int) -> list[dict[str, str]]:
batches = []
current_batch = {}
for n, (id, sequence) in enumerate(sequences.items()):
if (
sum(map(len, current_batch.values()))
+ min(len(sequence), max_batch_length)
> max_batch_length
):
batches.append(current_batch)
current_batch = {id: sequence}
else:
current_batch[id] = sequence
batches.append(current_batch)
return batches
def embed(
self, sequences: dict[str, str], max_batch_length: int = 4096
) -> dict[str, torch.tensor]:
batches = self.batch(sequences, max_batch_length)
with progress.Progress(
*progress.Progress.get_default_columns(), progress.TimeElapsedColumn()
) as pbar, torch.no_grad():
embedding_progress = pbar.add_task(
"Computing embeddings", total=sum(map(len, sequences.values()))
)
embeddings = {}
for batch in batches:
input_sequences = [
" ".join(list(re.sub(r"[UZOB]", "X", seq)))
for seq in batch.values()
]
input_tokens = self.tokenizer.batch_encode_plus(
input_sequences,
add_special_tokens=True,
padding="longest",
return_tensors="pt",
max_length=max_batch_length,
).to(self.device)
raw_embeddings = self.encoder(**input_tokens)
embeddings.update(
{
id: raw_embeddings.last_hidden_state[i, 1 : len(seq) + 1]
.detach()
.float()
.cpu()
for i, (id, seq) in enumerate(batch.items())
}
)
pbar.advance(embedding_progress, sum(map(len, batch.values())))
return embeddings
@staticmethod
def save_embeddings(embeddings: dict[str, torch.tensor], h5_path: Path) -> None:
h5_path.parent.mkdir(exist_ok=True, parents=True)
with h5py.File(h5_path, "w") as f:
for id, emb in embeddings.items():
f.create_dataset(id, data=emb.numpy())
def generate_embeddings(
input_fasta_file: Annotated[Path, typer.Argument(help="Path of input FASTA file")],
output_h5_file: Annotated[
Path, typer.Argument(help="Path for saving HDF5 file with computed embeddings")
],
cache_dir: Annotated[
Path,
typer.Option(
"-c", "--cache-dir", help="Custom path to download model checkpoints to"
),
],
embedding_type: Annotated[
EmbeddingType,
typer.Option(
"-e",
"--embedding-type",
case_sensitive=False,
help="Type of embeddings to generate",
),
] = EmbeddingType.esm2,
pretrained_path: Annotated[
str,
typer.Option("--pretrained-path", help="Path or URL of pretrained transformer"),
] = None,
):
if embedding_type and not pretrained_path:
pretrained_path = model_names[embedding_type]
sequences = {rec.id: str(rec.seq) for rec in SeqIO.parse(input_fasta_file, "fasta")}
embedder = Embedder(pretrained_path, cache_dir)
embeddings = embedder.embed(sequences)
Embedder.save_embeddings(embeddings, output_h5_file)
if __name__ == "__main__":
typer.run(generate_embeddings)
| 5,035 | Python | .py | 132 | 27.69697 | 88 | 0.575143 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,955 | gemme.py | JSchlensok_VespaG/vespag/data/gemme.py | from pathlib import Path
from typing import Annotated
import h5py
import pandas as pd
import typer
from rich import progress
app = typer.Typer()
def store_gemme_as_h5(gemme_folder: Path, output_file: Path) -> None:
with h5py.File(output_file, "w") as hdf:
for file in progress.track(
list(gemme_folder.glob("*_normPred_evolCombi.txt")),
description=f"Loading GEMME score files from {gemme_folder}",
):
protein_id = file.stem.replace("_normPred_evolCombi", "")
data = pd.read_csv(file, sep=" ").transpose().to_numpy().astype("double")
hdf.create_dataset(name=protein_id, data=data)
@app.command()
def load(
gemme_folder: Annotated[
Path, typer.Argument(help="Directory with raw GEMME predictions as txt files")
],
output_file: Annotated[Path, typer.Argument(help="Path of output H5 file")],
):
store_gemme_as_h5(gemme_folder, output_file)
@app.command()
def foo():
# This is just here to make Typer behave as it doesn't accept just one command
print("bar")
if __name__ == "__main__":
app()
| 1,115 | Python | .py | 30 | 31.966667 | 86 | 0.672558 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,956 | eval.py | JSchlensok_VespaG/vespag/eval/eval.py | import warnings
from pathlib import Path
from typing import Annotated
import polars as pl
import typer
import yaml
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from tqdm.rich import tqdm
from vespag.predict import generate_predictions
from vespag.utils import download, setup_logger, unzip
from vespag.utils.proteingym import PROTEINGYM_CHANGED_FILENAMES
app = typer.Typer()
@app.command()
def proteingym(
output_path: Annotated[
Path,
typer.Option(
"-o",
"--output",
help="Output path. Defaults to ./output/proteingym217 or ./output/proteingym87",
),
] = None,
dms_reference_file: Annotated[
Path, typer.Option("--reference-file", help="Path of DMS reference file")
] = None,
dms_directory: Annotated[
Path,
typer.Option(
"--dms-directory", help="Path of directory containing per-DMS score files"
),
] = None,
embedding_file: Annotated[
Path,
typer.Option(
"-e",
"--embeddings",
help="Path to pre-generated input embeddings. Embeddings will be generated from scratch if no path is provided",
),
] = None,
id_map_file: Annotated[
Path,
typer.Option(
"--id-map",
help="CSV file mapping embedding IDs to FASTA IDs if they're different",
),
] = None,
transform_scores: Annotated[
bool,
typer.Option(
"--transform/--dont-transform",
help="Whether to transform scores to same distribution as GEMME scores",
),
] = True,
normalize_scores: Annotated[
bool,
typer.Option(
"--normalize/--dont-normalize",
help="Whether to transform scores to [0, 1] range",
),
] = True,
legacy_mode: Annotated[
bool,
typer.Option(
"--v1/--v2",
help="Whether to evaluate on the first version (87 DMS) of ProteinGym",
),
] = False,
):
logger = setup_logger()
warnings.filterwarnings("ignore", message="rich is experimental/alpha")
benchmark_name, benchmark_version = (
("proteingym217", "v2") if not legacy_mode else ("proteingym87", "v1")
)
config = yaml.safe_load((Path.cwd() / "params.yaml").open("r"))["eval"][
"proteingym"
]
if not dms_reference_file:
dms_reference_file = Path.cwd() / f"data/test/{benchmark_name}/reference.csv"
download(
config["reference_file"][benchmark_version],
dms_reference_file,
"Downloading reference file",
remove_bar=True,
)
if not dms_directory:
dms_directory = Path.cwd() / f"data/test/{benchmark_name}/raw_dms_files/"
zip_file = dms_directory / "DMS.zip"
download(
config["dms_files"], zip_file, "Downloading DMS files", remove_bar=True
)
unzip(zip_file, dms_directory, "Extracting DMS files", remove_bar=True)
zip_file.unlink()
if not output_path:
output_path = Path.cwd() / f"output/{benchmark_name}"
output_path.mkdir(parents=True, exist_ok=True)
sequence_file = output_path / "sequences.fasta"
reference_df = pl.read_csv(dms_reference_file)
if legacy_mode:
new_filenames = pl.from_records(
[
{"DMS_id": key, "DMS_filename": val}
for key, val in PROTEINGYM_CHANGED_FILENAMES.items()
]
)
reference_df = (
reference_df.join(new_filenames, on="DMS_id", how="left")
.with_columns(
pl.col("DMS_filename_right").fill_null(pl.col("DMS_filename"))
)
.drop("DMS_filename")
.rename({"DMS_filename_right": "DMS_filename"})
)
sequences = [
SeqRecord(id=row["DMS_id"], seq=Seq(row["target_seq"]))
for row in reference_df.iter_rows(named=True)
]
logger.info(f"Writing {len(sequences)} sequences to {sequence_file}")
SeqIO.write(sequences, sequence_file, "fasta")
logger.info(f"Parsing mutation files from {dms_directory}")
mutation_file = output_path / "mutations.txt"
dms_files = {
row["DMS_id"]: pl.read_csv(dms_directory / row["DMS_filename"])
for row in reference_df.iter_rows(named=True)
}
pl.concat(
[
df.with_columns(pl.lit(dms_id).alias("DMS_id")).select(["DMS_id", "mutant"])
for dms_id, df in dms_files.items()
]
).write_csv(mutation_file)
logger.info("Generating predictions")
generate_predictions(
fasta_file=sequence_file,
output_path=output_path,
embedding_file=embedding_file,
mutation_file=mutation_file,
id_map_file=id_map_file,
single_csv=True,
transform_scores=transform_scores,
normalize_scores=normalize_scores,
)
mutation_file.unlink()
sequence_file.unlink()
prediction_file = output_path / "vespag_scores_all.csv"
all_preds = pl.read_csv(prediction_file)
logger.info(
"Computing Spearman correlations between experimental and predicted scores"
)
records = []
for dms_id, dms_df in tqdm(list(dms_files.items()), leave=False):
dms_df = dms_df.join(
all_preds.filter(pl.col("Protein") == dms_id),
left_on="mutant",
right_on="Mutation",
)
spearman = dms_df.select(
pl.corr("DMS_score", "VespaG", method="spearman")
).item()
records.append({"DMS_id": dms_id, "spearman": spearman})
result_csv_path = output_path / "VespaG_Spearman_per_DMS.csv"
result_df = pl.from_records(records)
logger.info(f"Writing results to {result_csv_path}")
logger.info(f"Mean Spearman r: {result_df['spearman'].mean():.5f}")
result_df.write_csv(result_csv_path)
| 5,935 | Python | .py | 166 | 27.86747 | 124 | 0.610184 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,957 | cnn.py | JSchlensok_VespaG/vespag/models/cnn.py | import torch
from jaxtyping import Float
from .utils import construct_fnn
"""
batch_size x L x 1536
- transform ->
batch_size x 1536 x L x 1
"""
class MinimalCNN(torch.nn.Module):
"""
1D convolution followed by two dense layers, akin to biotrainer's offering
Attributes:
input_dim: Size of the input vectors (e.g. 1024 for ProtT5 embeddings, 2560 for ESM-2 embeddings). Default: 1280
output_dim: Size of the output vector (e.g. 20 for GEMME scores). Default: 20
n_channels: Number of channels. Default: 256
kernel_size: Size of the convolving kernel, Default: 7
padding: Amount of padding applied to the input. Default: 3
fnn_hidden_layers: Dimensions of two dense hidden layers. Default: [256, 64]
activation_function: Activation function to use for the hidden layers. Default: LeakyReLU
output_activation_function: Activation function to use for the output layer, e.g. None for linear regression,
Sigmoid for logistic regression. Default: None
cnn_dropout_rate: Dropout rate to apply after every layer, if desired. Default: None
fnn_dropout_rate: Dropout rate to apply after every layer, if desired. Default: None
Examples:
gemme_esm2_cnn = MinimalCNN()
"""
def __init__(
self,
input_dim: int = 2560,
output_dim: int = 20,
n_channels: int = 256,
kernel_size=7,
padding=3,
fnn_hidden_layers: list[int] = [256, 64],
activation_function: torch.nn.Module = torch.nn.LeakyReLU,
output_activation_function: torch.nn.Module = None,
cnn_dropout_rate: float = None,
fnn_dropout_rate: float = None,
):
super(MinimalCNN, self).__init__()
conv_layers = [
torch.nn.Conv1d(
input_dim, n_channels, kernel_size=kernel_size, padding=padding
),
activation_function(),
]
if cnn_dropout_rate:
conv_layers.append(torch.nn.Dropout(cnn_dropout_rate))
self.conv = torch.nn.Sequential(*conv_layers)
self.fnn = construct_fnn(
fnn_hidden_layers,
n_channels,
output_dim,
activation_function,
output_activation_function,
fnn_dropout_rate,
)
def forward(
self, X: Float[torch.Tensor, "batch_size length input_dim"]
) -> Float[torch.Tensor, "batch_size length output_dim"]:
X = X.movedim(-1, -2)
X = self.conv(X)
X = X.movedim(-1, -2)
X = self.fnn(X)
return X.squeeze(-1)
class CombinedCNN(torch.nn.Module):
# TODO parametrize (CNN parameters, FNN parameters, shared FNN parameters)
"""
Parallel FNN and CNN whose outputs are concatenated and again fed through dense layers
"""
def __init__(
self,
input_dim: int = 1024,
output_dim: int = 20,
n_channels: int = 256,
kernel_size=7,
padding=3,
cnn_hidden_layers: list[int] = [64],
fnn_hidden_layers: list[int] = [256, 64],
shared_hidden_layers: list[int] = [64],
activation_function: torch.nn.Module = torch.nn.LeakyReLU,
output_activation_function: torch.nn.Module = None,
shared_dropout_rate: float = None,
cnn_dropout_rate: float = None,
fnn_dropout_rate: float = None,
):
super(CombinedCNN, self).__init__()
self.conv = MinimalCNN(
input_dim=input_dim,
output_dim=cnn_hidden_layers[-1],
n_channels=n_channels,
kernel_size=kernel_size,
padding=padding,
fnn_hidden_layers=cnn_hidden_layers[:-1],
activation_function=activation_function,
output_activation_function=activation_function,
cnn_dropout_rate=cnn_dropout_rate,
fnn_dropout_rate=fnn_dropout_rate,
)
self.fnn = construct_fnn(
hidden_layer_sizes=fnn_hidden_layers[:-1],
input_dim=input_dim,
output_dim=fnn_hidden_layers[-1],
activation_function=activation_function,
output_activation_function=activation_function,
dropout_rate=fnn_dropout_rate,
)
self.combined = construct_fnn(
hidden_layer_sizes=shared_hidden_layers,
input_dim=cnn_hidden_layers[-1] + fnn_hidden_layers[-1],
output_dim=output_dim,
activation_function=activation_function,
output_activation_function=output_activation_function,
dropout_rate=shared_dropout_rate,
)
def forward(self, X):
X_combined = torch.cat([self.conv(X), self.fnn(X)], dim=-1)
pred = self.combined(X_combined)
return pred.squeeze(-1)
| 4,808 | Python | .py | 119 | 31.327731 | 120 | 0.619119 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,958 | utils.py | JSchlensok_VespaG/vespag/models/utils.py | from copy import deepcopy
import torch
def construct_fnn(
hidden_layer_sizes: list[int],
input_dim: int = 1024,
output_dim: int = 20,
activation_function: torch.nn.Module = torch.nn.LeakyReLU,
output_activation_function: torch.nn.Module = None,
dropout_rate: float = None,
):
layer_sizes = deepcopy(hidden_layer_sizes)
layer_sizes.insert(0, input_dim)
layer_sizes.append(output_dim)
layers = []
for in_size, out_size in zip(layer_sizes, layer_sizes[1:]):
layers.append(torch.nn.Linear(in_size, out_size))
if dropout_rate:
layers.append(torch.nn.Dropout(dropout_rate))
layers.append(activation_function())
# remove last activation function
layers = layers[:-1]
# remove last dropout if applicable
if dropout_rate:
layers = layers[:-1]
if output_activation_function:
layers.append(output_activation_function())
return torch.nn.Sequential(*layers)
class MeanModel(torch.nn.Module):
def __init__(self, *models: torch.nn.Module):
super(MeanModel, self).__init__()
self.models = list(models)
def forward(self, x):
return sum([model(x) for model in self.models]) / len(self.models)
| 1,238 | Python | .py | 33 | 31.575758 | 74 | 0.675879 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,959 | __init__.py | JSchlensok_VespaG/vespag/models/__init__.py | from .cnn import CombinedCNN, MinimalCNN
from .fnn import FNN
__all__ = ["FNN", "MinimalCNN", "CombinedCNN"]
| 110 | Python | .py | 3 | 35.333333 | 46 | 0.726415 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,960 | fnn.py | JSchlensok_VespaG/vespag/models/fnn.py | import torch
from jaxtyping import Float
from .utils import construct_fnn
class FNN(torch.nn.Module):
"""
Fully-connected neural network with arbitrary hidden layers and activation functions
Attributes:
input_dim: Size of the input vectors (e.g. 1024 for ProtT5 embeddings, 2560 for ESM2 embeddings). Default: 2560
output_dim: Size of the output vector (e.g. 1 for conservation prediction, 20 for GEMME scores). Default: 20
activation_function: Activation function to use for the hidden layers. Default: LeakyReLU
output_activation_function: Activation function to use for the output layer, e.g. None for linear regression,
Sigmoid for logistic regression. Default: None
dropout_rate: Dropout rate to apply after every layer, if desired. Default: None
Examples:
linear_regression = FNN([], 2560, 20, None, None)
vespag = FNN([256], 2560, 20, dropout_rate=0.2)
"""
def __init__(
self,
hidden_layer_sizes: list[int],
input_dim: int = 2560,
output_dim: int = 20,
activation_function: torch.nn.Module = torch.nn.LeakyReLU,
output_activation_function: torch.nn.Module = None,
dropout_rate: float = None,
):
super(FNN, self).__init__()
self.net = construct_fnn(
hidden_layer_sizes,
input_dim,
output_dim,
activation_function,
output_activation_function,
dropout_rate,
)
for layer in self.net:
if isinstance(layer, torch.nn.Linear):
torch.nn.init.kaiming_normal_(layer.weight.data, a=1e-2)
torch.nn.init.zeros_(layer.bias.data)
def forward(
self, X: Float[torch.Tensor, "batch_size length input_dim"]
) -> Float[torch.Tensor, "batch_size length output_dim"]:
return self.net(X).squeeze(-1)
| 1,914 | Python | .py | 43 | 35.813953 | 119 | 0.64485 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,961 | predict.py | JSchlensok_VespaG/vespag/predict/predict.py | import csv
import os
import warnings
from pathlib import Path
import h5py
import numpy as np
import rich.progress as progress
import torch
from Bio import SeqIO
from tqdm.rich import tqdm
from vespag.data.embeddings import Embedder
from vespag.utils import (
AMINO_ACIDS,
DEFAULT_MODEL_PARAMETERS,
SAV,
compute_mutation_score,
get_device,
load_model,
mask_non_mutations,
read_mutation_file,
setup_logger,
)
from vespag.utils.type_hinting import *
def generate_predictions(
fasta_file: Path,
output_path: Path,
embedding_file: Path = None,
mutation_file: Path = None,
id_map_file: Path = None,
single_csv: bool = False,
no_csv: bool = False,
h5_output: bool = False,
zero_based_mutations: bool = False,
transform_scores: bool = True,
normalize_scores: bool = True,
embedding_type: EmbeddingType = "esm2",
) -> None:
logger = setup_logger()
warnings.filterwarnings("ignore", message="rich is experimental/alpha")
output_path = output_path or Path.cwd() / "output"
if not output_path.exists():
logger.info(f"Creating output directory {output_path}")
output_path.mkdir(parents=True)
device = get_device()
params = DEFAULT_MODEL_PARAMETERS
params["embedding_type"] = embedding_type
model = load_model(**params).eval().to(device, dtype=torch.float)
sequences = {rec.id: str(rec.seq) for rec in SeqIO.parse(fasta_file, "fasta")}
if embedding_file:
logger.info(f"Loading pre-computed embeddings from {embedding_file}")
embeddings = {
id: torch.from_numpy(np.array(emb[()], dtype=np.float32))
for id, emb in tqdm(
h5py.File(embedding_file).items(),
desc="Loading embeddings",
leave=False,
)
}
if id_map_file:
id_map = {row[0]: row[1] for row in csv.reader(id_map_file.open("r"))}
for from_id, to_id in id_map.items():
embeddings[to_id] = embeddings[from_id]
del embeddings[from_id]
else:
logger.info("Generating ESM2 embeddings")
if "HF_HOME" in os.environ:
plm_cache_dir = os.environ["HF_HOME"]
else:
plm_cache_dir = Path.cwd() / ".esm2_cache"
plm_cache_dir.mkdir(exist_ok=True)
embedder = Embedder("facebook/esm2_t36_3B_UR50D", plm_cache_dir)
embeddings = embedder.embed(sequences)
embedding_output_path = output_path / "esm2_embeddings.h5"
logger.info(
f"Saving generated ESM2 embeddings to {embedding_output_path} for re-use"
)
Embedder.save_embeddings(embeddings, embedding_output_path)
if mutation_file:
logger.info("Parsing mutational landscape")
mutations_per_protein = read_mutation_file(
mutation_file, one_indexed=not zero_based_mutations
)
else:
logger.info("Generating mutational landscape")
mutations_per_protein = {
protein_id: [
SAV(i, wildtype_aa, other_aa, not zero_based_mutations)
for i, wildtype_aa in enumerate(sequence)
for other_aa in AMINO_ACIDS
if other_aa != wildtype_aa
]
for protein_id, sequence in tqdm(sequences.items(), leave=False)
}
logger.info("Generating predictions")
vespag_scores = {}
scores_per_protein = {}
with progress.Progress(
progress.TextColumn("[progress.description]Generating predictions"),
progress.BarColumn(),
progress.TaskProgressColumn(),
progress.TimeElapsedColumn(),
progress.TextColumn("Current protein: {task.description}"),
) as pbar, torch.no_grad():
overall_progress = pbar.add_task(
"Generating predictions",
total=sum([len(mutations) for mutations in mutations_per_protein.values()]),
)
for id, sequence in sequences.items():
pbar.update(overall_progress, description=id)
embedding = embeddings[id].to(device)
y = model(embedding)
y = mask_non_mutations(y, sequence)
scores_per_protein[id] = {
mutation: compute_mutation_score(
y,
mutation,
pbar=pbar,
progress_id=overall_progress,
transform=transform_scores,
normalize=normalize_scores,
)
for mutation in mutations_per_protein[id]
}
if h5_output:
vespag_scores[id] = y.detach().numpy()
pbar.remove_task(overall_progress)
if h5_output:
h5_output_path = output_path / "vespag_scores_all.h5"
logger.info(f"Serializing predictions to {h5_output_path}")
with h5py.File(h5_output_path, "w") as f:
for id, vespag_prediction in tqdm(vespag_scores.items(), leave=False):
f.create_dataset(id, data=vespag_prediction)
if not no_csv:
logger.info("Generating CSV output")
if not single_csv:
for protein_id, mutations in tqdm(scores_per_protein.items(), leave=False):
output_file = output_path / (protein_id + ".csv")
with output_file.open("w+") as f:
f.write("Mutation,VespaG\n")
f.writelines(
[f"{str(sav)},{score}\n" for sav, score in mutations.items()]
)
else:
output_file = output_path / "vespag_scores_all.csv"
with output_file.open("w+") as f:
f.write("Protein,Mutation,VespaG\n")
f.writelines(
[
line
for line in tqdm(
[
f"{protein_id},{str(sav)},{score}\n"
for protein_id, mutations in scores_per_protein.items()
for sav, score in mutations.items()
],
leave=False,
)
]
)
| 6,250 | Python | .py | 159 | 28.383648 | 88 | 0.575613 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,962 | proteingym.py | JSchlensok_VespaG/vespag/utils/proteingym.py | INFO_COLUMNS = ["DMS_id", "UniProt_ID", "taxon", "coarse_selection_type"]
PROTEINGYM_CHANGED_FILENAMES = {
"A0A140D2T1_ZIKV_Sourisseau_growth_2019": "A0A140D2T1_ZIKV_Sourisseau_2019.csv",
"A4_HUMAN_Seuma_2021": "A4_HUMAN_Seuma_2022.csv",
"A4D664_9INFA_Soh_CCL141_2019": "A4D664_9INFA_Soh_2019.csv",
"CAPSD_AAV2S_Sinai_substitutions_2021": "CAPSD_AAV2S_Sinai_2021.csv",
"CP2C9_HUMAN_Amorosi_abundance_2021": "CP2C9_HUMAN_Amorosi_2021_abundance.csv",
"CP2C9_HUMAN_Amorosi_activity_2021": "CP2C9_HUMAN_Amorosi_2021_activity.csv",
"DYR_ECOLI_Thompson_plusLon_2019": "DYR_ECOLI_Thompson_2019.csv",
"GCN4_YEAST_Staller_induction_2018": "GCN4_YEAST_Staller_2018.csv",
"B3VI55_LIPST_Klesmith_2015": "LGK_LIPST_Klesmith_2015.csv",
"MTH3_HAEAE_Rockah-Shmuel_2015": "MTH3_HAEAE_RockahShmuel_2015.csv",
"NRAM_I33A0_Jiang_standard_2016": "NRAM_I33A0_Jiang_2016.csv",
"P53_HUMAN_Giacomelli_NULL_Etoposide_2018": "P53_HUMAN_Giacomelli_2018_Null_Etoposide.csv",
"P53_HUMAN_Giacomelli_NULL_Nutlin_2018": "P53_HUMAN_Giacomelli_2018_Null_Nutlin.csv",
"P53_HUMAN_Giacomelli_WT_Nutlin_2018": "P53_HUMAN_Giacomelli_2018_WT_Nutlin.csv",
"R1AB_SARS2_Flynn_growth_2022": "R1AB_SARS2_Flynn_2022.csv",
"RL401_YEAST_Mavor_2016": "RL40A_YEAST_Mavor_2016.csv",
"RL401_YEAST_Roscoe_2013": "RL40A_YEAST_Roscoe_2013.csv",
"RL401_YEAST_Roscoe_2014": "RL40A_YEAST_Roscoe_2014.csv",
"SPIKE_SARS2_Starr_bind_2020": "SPIKE_SARS2_Starr_2020_binding.csv",
"SPIKE_SARS2_Starr_expr_2020": "SPIKE_SARS2_Starr_2020_expression.csv",
"SRC_HUMAN_Ahler_CD_2019": "SRC_HUMAN_Ahler_2019.csv",
"TPOR_HUMAN_Bridgford_S505N_2020": "TPOR_HUMAN_Bridgford_2020.csv",
"VKOR1_HUMAN_Chiasson_abundance_2020": "VKOR1_HUMAN_Chiasson_2020_abundance.csv",
"VKOR1_HUMAN_Chiasson_activity_2020": "VKOR1_HUMAN_Chiasson_2020_activity.csv",
}
| 1,872 | Python | .py | 27 | 64.740741 | 95 | 0.730477 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,963 | eval.py | JSchlensok_VespaG/vespag/utils/eval.py | from typing import Sequence
import pingouin as pg
def bootstrap_mean(data: Sequence[float]) -> dict[str, float]:
ci, dist = pg.compute_bootci(
data,
func="mean",
method="norm",
n_boot=1000,
decimals=3,
seed=42,
return_dist=True,
)
mean = data.mean()
stderr = (ci[1] - ci[0]) / 2 # force symmetrization to get rid of tiny differences
return {"mean": mean, "stderr": stderr}
| 454 | Python | .py | 15 | 23.933333 | 87 | 0.604598 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,964 | type_hinting.py | JSchlensok_VespaG/vespag/utils/type_hinting.py | from enum import Enum
class PrecisionType(str, Enum):
half = "half"
float = "float"
class Architecture(str, Enum):
fnn = "fnn"
cnn = "cnn"
combined = "combined"
mean = "mean"
class EmbeddingType(str, Enum):
esm2 = "esm2"
prott5 = "prott5"
| 277 | Python | .py | 12 | 18.916667 | 31 | 0.640927 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,965 | utils.py | JSchlensok_VespaG/vespag/utils/utils.py | from __future__ import annotations
import logging
import math
import zipfile
from pathlib import Path
from typing import Literal
import numpy as np
import pandas as pd
import requests
import rich.progress as progress
import torch
import torch.multiprocessing as mp
from rich.logging import RichHandler
from vespag.models import FNN, MinimalCNN
from .type_hinting import Architecture, EmbeddingType
GEMME_ALPHABET = "ACDEFGHIKLMNPQRSTVWY"
VESPA_ALPHABET = "ALGVSREDTIPKFQNYMHWC"
AMINO_ACIDS = sorted(list(set(GEMME_ALPHABET)))
DEFAULT_MODEL_PARAMETERS = {
"architecture": "fnn",
"model_parameters": {"hidden_dims": [256], "dropout_rate": 0.2},
"embedding_type": "esm2",
}
MODEL_VERSION = "v2"
def save_async(obj, pool: mp.Pool, path: Path, mkdir: bool = True):
if mkdir:
path.parent.mkdir(parents=True, exist_ok=True)
pool.apply_async(torch.save, (obj, path))
def load_model_from_config(
architecture: str, model_parameters: dict, embedding_type: str
):
if architecture == "fnn":
model = FNN(
hidden_layer_sizes=model_parameters["hidden_dims"],
input_dim=get_embedding_dim(embedding_type),
dropout_rate=model_parameters["dropout_rate"],
)
elif architecture == "cnn":
model = MinimalCNN(
input_dim=get_embedding_dim(embedding_type),
n_channels=model_parameters["n_channels"],
kernel_size=model_parameters["kernel_size"],
padding=model_parameters["padding"],
fnn_hidden_layers=model_parameters["fully_connected_layers"],
cnn_dropout_rate=model_parameters["dropout"]["cnn"],
fnn_dropout_rate=model_parameters["dropout"]["fnn"],
)
return model
def load_model(
architecture: Architecture,
model_parameters: dict,
embedding_type: EmbeddingType,
checkpoint_file: Path = None,
) -> torch.nn.Module:
checkpoint_file = (
checkpoint_file
or Path.cwd() / f"model_weights/{MODEL_VERSION}/{embedding_type}.pt"
)
model = load_model_from_config(architecture, model_parameters, embedding_type)
model.load_state_dict(torch.load(checkpoint_file))
return model
def setup_logger() -> logging.Logger:
logging.basicConfig(
level="NOTSET", format="%(message)s", datefmt="[%X]", handlers=[RichHandler()]
)
logger = logging.getLogger("rich")
logger.setLevel(logging.INFO)
return logger
def get_embedding_dim(embedding_type: EmbeddingType) -> int:
if embedding_type == "prott5":
return 1024
elif embedding_type == "esm2":
return 2560
def get_device() -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda:0")
elif torch.backends.mps.is_available():
return torch.device("mps")
else:
return torch.device("cpu")
def get_precision() -> Literal["half", "float"]:
if "cuda" in str(get_device()):
return "half"
else:
return "float"
def download(
url: str, path: Path, progress_description: str, remove_bar: bool = False
) -> None:
path.parent.mkdir(exist_ok=True, parents=True)
with progress.Progress(
progress.TextColumn("[progress.description]{task.description}"),
progress.BarColumn(),
progress.TaskProgressColumn(),
progress.DownloadColumn(),
progress.TransferSpeedColumn(),
) as pbar, open(path, "wb") as f:
response = requests.get(url, stream=True)
total_size = int(response.headers.get("content-length", 0))
download_progress = pbar.add_task(progress_description, total=total_size)
for data in response.iter_content(256):
f.write(data)
pbar.update(download_progress, advance=len(data))
if remove_bar:
pbar.remove_task(download_progress)
def unzip(
zip_path: Path, out_path: Path, progress_description: str, remove_bar: bool = False
) -> None:
out_path.mkdir(exist_ok=True, parents=True)
with progress.Progress(
*progress.Progress.get_default_columns()
) as pbar, zipfile.ZipFile(zip_path, "r") as zip:
extraction_progress = pbar.add_task(
progress_description, total=len(zip.infolist())
)
for member in zip.infolist():
zip.extract(member, out_path)
pbar.advance(extraction_progress)
if remove_bar:
pbar.remove_task(extraction_progress)
def read_gemme_table(txt_file: Path) -> np.ndarray:
df = pd.read_csv(txt_file, sep=" ").fillna(0)
return df.to_numpy()
raw_score_cdf = np.loadtxt("data/score_transformation/vespag_scores.csv", delimiter=",")
sorted_gemme_scores = np.loadtxt(
"data/score_transformation/sorted_gemme_scores.csv", delimiter=","
)
def transform_score(score: float) -> float:
"""Transform VespaG score distribution by mapping it to a known distribution of GEMME scores through its quantile"""
quantile = (raw_score_cdf <= score).mean()
score = np.interp(
quantile, np.linspace(0, 1, len(sorted_gemme_scores)), sorted_gemme_scores
)
return score
def normalize_score(score: float) -> float:
"""Normalize VespaG score to [0, 1] range."""
return 1 / (1 + math.exp(-score))
| 5,254 | Python | .py | 136 | 32.639706 | 120 | 0.677553 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,966 | __init__.py | JSchlensok_VespaG/vespag/utils/__init__.py | from .mutations import *
from .utils import *
__all__ = [
"AMINO_ACIDS",
"compute_mutation_score",
"DEFAULT_MODEL_PARAMETERS",
"download",
"GEMME_ALPHABET",
"get_device" "get_embedding_dim",
"get_precision",
"load_model",
"mask_non_mutations",
"Mutation",
"read_gemme_table",
"read_mutation_file",
"SAV",
"save_async",
"setup_logger",
"unzip",
"VESPA_ALPHABET",
]
| 433 | Python | .py | 21 | 16.333333 | 37 | 0.608273 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,967 | mutations.py | JSchlensok_VespaG/vespag/utils/mutations.py | from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import polars as pl
import rich
import torch
from jaxtyping import Float
from .utils import GEMME_ALPHABET, normalize_score, transform_score
@dataclass
class SAV:
position: int
from_aa: str
to_aa: str
one_indexed: bool = False
@classmethod
def from_sav_string(
cls, sav_string: str, one_indexed: bool = False, offset: int = 0
) -> SAV:
from_aa, to_aa = sav_string[0], sav_string[-1]
position = int(sav_string[1:-1]) - offset
if one_indexed:
position -= 1
return SAV(position, from_aa, to_aa, one_indexed=one_indexed)
def __str__(self) -> str:
pos = self.position
if self.one_indexed:
pos += 1
return f"{self.from_aa}{pos}{self.to_aa}"
def __hash__(self):
return hash(str(self))
@dataclass
class Mutation:
savs: list[SAV]
@classmethod
def from_mutation_string(
cls, mutation_string: str, one_indexed: bool = False, offset: int = 0
) -> Mutation:
return Mutation(
[
SAV.from_sav_string(sav_string, one_indexed=one_indexed, offset=offset)
for sav_string in mutation_string.split(":")
]
)
def __str__(self) -> str:
return ":".join([str(sav) for sav in self.savs])
def __hash__(self):
return hash(str(self))
def __iter__(self):
yield from self.savs
def mask_non_mutations(
gemme_prediction: Float[torch.Tensor, "length 20"], wildtype_sequence
) -> Float[torch.Tensor, "length 20"]:
"""
Simply set the predicted effect of the wildtype amino acid at each position (i.e. all non-mutations) to 0
"""
gemme_prediction[
torch.arange(len(wildtype_sequence)),
torch.tensor([GEMME_ALPHABET.index(aa) for aa in wildtype_sequence]),
] = 0.0
return gemme_prediction
def read_mutation_file(
mutation_file: Path, one_indexed: bool = False
) -> dict[str, list[SAV]]:
mutations_per_protein = defaultdict(list)
for row in pl.read_csv(mutation_file).iter_rows():
mutations_per_protein[row[0]].append(
Mutation.from_mutation_string(row[1], one_indexed)
)
return mutations_per_protein
def compute_mutation_score(
substitution_score_matrix: Float[torch.Tensor, "length 20"],
mutation: Union[Mutation, SAV],
alphabet: str = GEMME_ALPHABET,
transform: bool = True,
normalize: bool = True,
pbar: rich.progress.Progress = None,
progress_id: int = None,
) -> float:
if pbar:
pbar.advance(progress_id)
if isinstance(mutation, Mutation):
raw_scores = [
substitution_score_matrix[sav.position][alphabet.index(sav.to_aa)].item()
for sav in mutation
]
else:
raw_scores = [
substitution_score_matrix[mutation.position][
alphabet.index(mutation.to_aa)
].item()
]
if transform:
raw_scores = [transform_score(score) for score in raw_scores]
score = sum(raw_scores)
if normalize:
score = normalize_score(score)
return score
| 3,293 | Python | .py | 99 | 26.656566 | 109 | 0.637828 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,968 | style.py | JSchlensok_VespaG/vespag/utils/plotting/style.py | # Main colors
PINK = "#DC267F"
BLUE = "#785EF0"
YELLOW = "#FFB000"
# Grey shades
CHARCOAL = "#232023"
IRON = "#322D31"
GRAPHITE = "#594D5B"
GRAY = "#808080"
COIN = "#9897A9"
# Auxiliary colors
MALIBU = "#648FFF"
ORANGE = "#FE6100"
METHOD_COLORS = {
"VespaG": PINK,
"GEMME": BLUE,
"VESPA": YELLOW,
"TranceptEVE-L": GRAPHITE,
"ESM-2 (3B)": GRAY,
"SaProt (650M)": COIN,
"AlphaMissense": CHARCOAL,
"PoET": IRON,
}
MULTILINE_LABELS = {
"VespaG": "VespaG",
"GEMME": "GEMME",
"VESPA": "VESPA",
"PoET": "PoET",
"AlphaMissense": "Alpha\nMissense",
"TranceptEVE-L": "Trancept\nEVE-L",
"SaProt (650M)": "SaProt\n(650M)",
"ESM-2 (3B)": "ESM-2\n(3B)",
}
MILLIMETER = 1 / 2.54 / 10
WIDTH = 180 * MILLIMETER
HEIGHT = 100 * MILLIMETER
BARLABEL_FONTSIZE = 8
XTICK_FONTSIZE = 7
PANEL_LABEL_FONTSIZE = 16
BARPLOT_KEYWORDS = {
"errorbar": ("se", 1.96),
"n_boot": 1000,
"err_kws": {"linewidth": 1},
"capsize": 0.2,
}
| 983 | Python | .py | 45 | 18.911111 | 39 | 0.613319 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,969 | utils.py | JSchlensok_VespaG/vespag/utils/plotting/utils.py | from typing import Union
import matplotlib as mpl
def label_bars(
ax: mpl.axes.Axes, digits: int = 3, fontsize: Union[str, int] = "small"
) -> None:
for c in ax.containers:
ax.bar_label(
c,
fmt=f"%.{digits}f",
label_type="center",
fontsize=fontsize,
color="white",
)
def change_width(ax: mpl.axes.Axes, new_value: float) -> None:
for patch in ax.patches:
current_width = patch.get_width()
diff = current_width - new_value
patch.set_width(new_value)
patch.set_x(patch.get_x() + diff * 0.5)
| 615 | Python | .py | 19 | 24.736842 | 75 | 0.581356 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,970 | seaborn_plotting.py | JSchlensok_VespaG/vespag/utils/plotting/seaborn_plotting.py | import functools as ft
from dataclasses import dataclass
from typing import Union
import polars as pl
import seaborn as sns
# Copyright (c) 2023 Christopher Prohm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
@pl.api.register_dataframe_namespace("sns")
@pl.api.register_lazyframe_namespace("sns")
@dataclass
class SeabornPlotting:
df: Union[pl.DataFrame, pl.LazyFrame]
def pipe(self, func, /, **kwargs):
def maybe_collect(df):
return df.collect() if isinstance(df, pl.LazyFrame) else df
exprs = {}
for key in "x", "y", "hue", "col", "row":
val = kwargs.get(key)
if val is None:
continue
expr = pl.col(val) if isinstance(val, str) else val
exprs[expr.meta.output_name()] = expr
kwargs[key] = expr.meta.output_name()
return (
self.df.select(list(exprs.values()))
.pipe(maybe_collect)
.to_pandas()
.pipe(func, **kwargs)
)
relplot = ft.partialmethod(pipe, sns.relplot)
scatterplot = ft.partialmethod(pipe, sns.scatterplot)
lineplot = ft.partialmethod(pipe, sns.lineplot)
displot = ft.partialmethod(pipe, sns.displot)
histplot = ft.partialmethod(pipe, sns.histplot)
kdeplot = ft.partialmethod(pipe, sns.kdeplot)
ecdfplot = ft.partialmethod(pipe, sns.ecdfplot)
rugplot = ft.partialmethod(pipe, sns.rugplot)
distplot = ft.partialmethod(pipe, sns.distplot)
catplot = ft.partialmethod(pipe, sns.catplot)
stripplot = ft.partialmethod(pipe, sns.stripplot)
swarmplot = ft.partialmethod(pipe, sns.swarmplot)
boxplot = ft.partialmethod(pipe, sns.boxplot)
violinplot = ft.partialmethod(pipe, sns.violinplot)
boxenplot = ft.partialmethod(pipe, sns.boxenplot)
pointplot = ft.partialmethod(pipe, sns.pointplot)
barplot = ft.partialmethod(pipe, sns.barplot)
countplot = ft.partialmethod(pipe, sns.countplot)
lmplot = ft.partialmethod(pipe, sns.lmplot)
regplot = ft.partialmethod(pipe, sns.regplot)
residplot = ft.partialmethod(pipe, sns.residplot)
| 3,120 | Python | .py | 68 | 40.794118 | 80 | 0.719921 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,971 | __init__.py | JSchlensok_VespaG/vespag/utils/plotting/__init__.py | from .seaborn_plotting import SeabornPlotting
from .style import (
BARLABEL_FONTSIZE,
BARPLOT_KEYWORDS,
HEIGHT,
METHOD_COLORS,
MILLIMETER,
MULTILINE_LABELS,
PANEL_LABEL_FONTSIZE,
WIDTH,
XTICK_FONTSIZE,
)
from .utils import label_bars
__all__ = [
"BARLABEL_FONTSIZE",
"BARPLOT_KEYWORDS",
"HEIGHT",
"label_bars",
"METHOD_COLORS",
"MILLIMETER",
"MULTILINE_LABELS",
"PANEL_LABEL_FONTSIZE",
"SeabornPlotting",
"WIDTH",
"XTICK_FONTSIZE",
]
| 515 | Python | .py | 26 | 15.692308 | 45 | 0.668033 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,972 | trainer.py | JSchlensok_VespaG/vespag/training/trainer.py | import logging
import shutil
from pathlib import Path
import rich.progress as progress
import torch
import torch.multiprocessing as mp
import wandb
from vespag.utils import save_async
class Trainer:
def __init__(
self,
run: str,
model: torch.nn.Module,
device: torch.device,
pool: mp.Pool,
train_dl: torch.utils.data.DataLoader,
train_eval_dls: dict[str, torch.utils.data.DataLoader],
val_dls: dict[str, torch.utils.data.DataLoader],
optimizer: torch.optim.Optimizer,
scheduler,
criterion,
progress_bar: progress.Progress,
output_dir: Path,
logger: logging.Logger = None,
use_wandb: bool = True,
):
self.run = run
self.device = device
self.pool = pool
self.model = model
self.train_dl = train_dl
self.train_eval_dls = train_eval_dls
self.val_dls = val_dls
self.optimizer = optimizer
self.scheduler = scheduler
self.criterion = criterion
self.progress_bar = progress_bar
self.output_dir = output_dir
self.epoch = 0
self.best_epoch = 0
self.logger = logger
self.use_wandb = use_wandb
if self.use_wandb:
wandb.watch(self.model, log_freq=10)
self.best_loss = torch.inf
self.total_steps = 0
self.total_batches = 0
self.best_metadata = None
def train_epoch(self):
progress_id = self.progress_bar.add_task(
f"Train epoch: {self.epoch + 1:4d}", total=len(self.train_dl)
)
self.model.train()
for embeddings, annotations in self.train_dl:
self.total_steps += embeddings.shape[0]
self.total_batches += 1
self.optimizer.zero_grad()
with torch.autocast("cuda"):
pred = self.model(embeddings)
# Don't backpropagate any loss for NaN annotations
annotation_nan_mask = torch.isnan(annotations)
annotations[annotation_nan_mask] = pred[annotation_nan_mask]
loss = self.criterion(pred, annotations)
loss.backward()
self.optimizer.step()
# Re-set annotation NaN values
annotations[annotation_nan_mask] = torch.nan
if self.use_wandb:
wandb.log(
{
"train/batch_loss": loss,
"epoch": self.epoch,
"step": self.total_steps,
"batch": self.total_batches,
}
)
self.progress_bar.advance(progress_id)
self.epoch += 1
self.progress_bar.remove_task(progress_id)
@torch.no_grad()
def _infer(
self, dl: torch.utils.data.DataLoader, progress_id: progress.TaskID
) -> tuple[torch.Tensor, torch.Tensor]:
all_annotations = []
all_preds = []
for embeddings, annotations in dl:
with torch.autocast("cuda"):
preds = self.model(embeddings)
del embeddings
all_annotations.append(annotations)
del annotations
all_preds.append(preds)
del preds
self.progress_bar.advance(progress_id)
return torch.cat(all_annotations), torch.cat(all_preds)
@torch.no_grad()
def train_eval_epoch(self, save_predictions: bool = False):
self.model.eval()
n_train_batches = int(sum(len(dl) for dl in self.train_eval_dls.values()))
progress_id = self.progress_bar.add_task(
f"Train eval epoch: {self.epoch:4d}", total=n_train_batches
)
all_annotations = []
all_preds = []
for dataset, dl in self.train_eval_dls.items():
annotations, preds = self._infer(dl, progress_id)
all_annotations.append(annotations)
all_preds.append(preds)
if save_predictions:
save_async(
[preds.cpu(), annotations.cpu()],
self.pool,
self.output_dir / f"epoch-{self.epoch}/train/{dataset}.pt",
)
nan_mask = torch.isnan(annotations)
annotations = annotations[~nan_mask]
preds = preds[~nan_mask]
loss = self.criterion(preds, annotations)
del preds
del annotations
# spearman = tm.functional.spearman_corrcoef(preds, annotations).item()
if self.use_wandb:
wandb.log(
{
"epoch": self.epoch,
f"train/{dataset}/loss": loss,
# f"train/{dataset}/spearman": spearman,
"step": self.total_steps,
}
)
all_annotations = torch.cat(all_annotations)
all_preds = torch.cat(all_preds)
nan_mask = torch.isnan(all_annotations)
all_annotations = all_annotations[~nan_mask]
all_preds = all_preds[~nan_mask]
loss = self.criterion(all_preds, all_annotations)
del all_preds
del all_annotations
if self.use_wandb:
wandb.log(
{
"epoch": self.epoch,
"train/overall/loss": loss,
# "train/overall/spearman": spearman,
"step": self.total_steps,
}
)
self.progress_bar.remove_task(progress_id)
@torch.no_grad()
def val_epoch(self, save_predictions: bool = False):
self.model.eval()
n_val_batches = int(sum(len(dl) for dl in self.val_dls.values()))
progress_id = self.progress_bar.add_task(
f"Val epoch: {self.epoch:4d}", total=n_val_batches
)
all_annotations = []
all_preds = []
for dataset, dl in self.val_dls.items():
annotations, preds = self._infer(dl, progress_id)
all_annotations.append(annotations)
all_preds.append(preds)
if save_predictions:
save_async(
[preds.cpu(), annotations.cpu()],
self.pool,
self.output_dir / f"epoch-{self.epoch}/val/{dataset}.pt",
)
nan_mask = torch.isnan(annotations)
annotations = annotations[~nan_mask]
preds = preds[~nan_mask]
loss = self.criterion(preds, annotations)
del preds
del annotations
# spearman = tm.functional.spearman_corrcoef(preds, annotations).item()
if self.use_wandb:
wandb.log(
{
"epoch": self.epoch,
f"val/{dataset}/loss": loss,
# f"val/{dataset}/spearman": spearman,
"step": self.total_steps,
}
)
all_annotations = torch.cat(all_annotations)
all_preds = torch.cat(all_preds)
nan_mask = torch.isnan(all_annotations)
all_annotations = all_annotations[~nan_mask]
all_preds = all_preds[~nan_mask]
loss = self.criterion(all_preds, all_annotations)
del all_preds
del all_annotations
if self.use_wandb:
wandb.log(
{
"epoch": self.epoch,
"train/learning_rate": self.optimizer.param_groups[0]["lr"],
}
)
self.scheduler.step(loss)
metadata = {
"val/overall/loss": loss,
"epoch": self.epoch,
"step": self.total_steps,
}
if self.use_wandb:
wandb.log(metadata)
if loss < self.best_loss:
self.save_state_dict(f"epoch-{self.epoch}")
# TODO avoid deleting checkpoints made in train.py based on checkpoint_every_epoch condition
if self.best_epoch > 0:
shutil.rmtree(f"{self.output_dir}/epoch-{self.best_epoch}")
self.best_epoch = self.epoch
self.best_loss = loss
self.best_metadata = metadata
self.progress_bar.remove_task(progress_id)
def save_state_dict(self, alias: str) -> None:
if self.logger:
self.logger.info(
f"Saving checkpoint to {self.output_dir}/{alias}/state_dict.pt"
)
checkpoint_path = self.output_dir / f"{alias}/state_dict.pt"
save_async(
{key: value.cpu() for key, value in self.model.state_dict().items()},
self.pool,
checkpoint_path,
)
def on_train_start(self):
pass
def on_train_end(self):
self.save_state_dict(f"epoch-{self.epoch}")
if self.use_wandb:
latest_artifact = wandb.Artifact(name=f"model-{self.run}", type="model")
latest_artifact.add_dir(self.output_dir / f"epoch-{self.epoch}")
if self.best_epoch == self.epoch:
wandb.log_artifact(latest_artifact, aliases=["latest", "best"])
else:
wandb.log_artifact(latest_artifact, aliases=["latest"])
best_artifact = wandb.Artifact(
name=f"model-{self.run}", type="model", metadata=self.best_metadata
)
best_artifact.add_dir(self.output_dir / f"epoch-{self.best_epoch}")
wandb.log_artifact(best_artifact, aliases=["best"])
| 9,584 | Python | .py | 242 | 27.136364 | 104 | 0.544145 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,973 | train.py | JSchlensok_VespaG/vespag/training/train.py | import gc
import logging
import os
from pathlib import Path
import rich.progress as progress
import torch
import torch.multiprocessing as mp
import torch.optim.lr_scheduler
import wandb
from dvc.api import params_show
from vespag.utils import get_device, get_precision, load_model_from_config, setup_logger
from .dataset import PerResidueDataset
from .trainer import Trainer
def capitalize_embedding_type(embedding_type: str) -> str:
return {"prott5": "ProtT5", "esm2": "ESM2"}[embedding_type]
def train(
model_config_key: str,
datasets: list[str],
output_dir: Path,
embedding_type: str,
compute_full_train_loss: bool = False,
sampling_strategy: str = "basic",
wandb_config: tuple[str, str] = None,
limit_cache: bool = False,
use_full_dataset: bool = False,
):
logger = setup_logger()
wandb_logger = logging.getLogger("wandb")
wandb_logger.setLevel(logging.INFO)
device = get_device()
precision = get_precision()
logger.info(f"Using device {str(device)} with precision {precision}")
params = params_show()
torch.manual_seed(params["random"]["seed"])
training_parameters = params["models"][model_config_key]["training_parameters"]
training_batch_size = training_parameters["batch_size"]["training"]
validation_batch_size = training_parameters["batch_size"]["validation"]
learning_rate = training_parameters["learning_rate"]
epochs = training_parameters["epochs"]
val_every_epoch = training_parameters["val_every_epoch"]
checkpoint_every_epoch = training_parameters["checkpoint_every_epoch"] or 999999
dataset_parameters = params["datasets"]
logger.info("Loading training data")
max_len = 4096 if embedding_type == "esm2" else 99999
train_datasets = {
dataset: PerResidueDataset(
dataset_parameters["train"][dataset]["embeddings"][embedding_type],
dataset_parameters["train"][dataset]["gemme"],
(
dataset_parameters["train"][dataset]["splits"]["train"]
if not use_full_dataset
else dataset_parameters["train"][dataset]["splits"]["full"]
),
precision,
device,
max_len,
limit_cache,
)
for dataset in datasets
}
big_train_dataset = torch.utils.data.ConcatDataset(list(train_datasets.values()))
if sampling_strategy == "basic":
train_dl = torch.utils.data.DataLoader(
big_train_dataset, batch_size=training_batch_size, shuffle=True
)
else:
# TODO implement properly
# TODO factor out
epoch_size = len(big_train_dataset) # TODO read from config if provided
train_weights = [
1 / (len(dataset) / len(big_train_dataset)) * (1 / row["cluster_size"])
for dataset in big_train_dataset.datasets
for row in dataset.cluster_df.rows(named=True)
for aa in row["seq"][:max_len]
]
train_dl = torch.utils.data.DataLoader(
big_train_dataset,
batch_size=training_batch_size,
sampler=torch.utils.data.WeightedRandomSampler(
train_weights, epoch_size, replacement=True
),
shuffle=True,
)
train_eval_dls = (
{
name: torch.utils.data.DataLoader(
dataset, batch_size=validation_batch_size, shuffle=False
)
for name, dataset in train_datasets.items()
}
if not use_full_dataset
else None
)
logger.info("Loading validation data")
if not use_full_dataset:
val_datasets = {
dataset: PerResidueDataset(
dataset_parameters["train"][dataset]["embeddings"][embedding_type],
dataset_parameters["train"][dataset]["gemme"],
dataset_parameters["train"][dataset]["splits"]["val"],
precision,
device,
max_len,
limit_cache,
)
for dataset in datasets
}
val_dls = {
name: torch.utils.data.DataLoader(
dataset, batch_size=validation_batch_size, shuffle=False
)
for name, dataset in val_datasets.items()
}
else:
val_dls = None
architecture = params["models"][model_config_key]["architecture"]
model_parameters = params["models"][model_config_key]["model_parameters"]
model = load_model_from_config(architecture, model_parameters, embedding_type).to(
device
)
# TODO parametrize
criterion = torch.nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
# TODO pull out to param file
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, patience=epochs / 25, factor=0.33
)
if wandb_config:
logger.info("Setting up WandB")
config = {"datasets": datasets, **params["models"][model_config_key]}
run_name = f"{'+'.join([dataset_name.capitalize() for dataset_name in datasets])} {model_config_key.upper()} {capitalize_embedding_type(embedding_type)}"
run = wandb.init(
entity=wandb_config[0],
project=wandb_config[1],
config=config,
name=run_name,
)
logger.info(f"Saving WandB run ID to {output_dir}/wandb_run_id.txt")
output_dir.mkdir(parents=True, exist_ok=True)
with open(output_dir / "wandb_run_id.txt", "w+") as f:
f.write(run.id)
wandb.define_metric("step")
wandb.define_metric("train/batch_loss", step_metric="step")
wandb.define_metric("batch")
wandb.define_metric("epoch")
wandb.define_metric("learning_rate", step_metric="epoch")
for dataset in datasets + ["overall"]:
wandb.define_metric(
f"train/{dataset}/loss", step_metric="epoch", summary="min"
)
wandb.define_metric(
f"train/{dataset}/spearman", step_metric="epoch", summary="max"
)
wandb.define_metric(
f"val/{dataset}/loss", step_metric="epoch", summary="min"
)
wandb.define_metric(
f"val/{dataset}/spearman", step_metric="epoch", summary="max"
)
wandb_cache_dir = Path.cwd() / ".wandb/cache"
wandb_cache_dir.mkdir(exist_ok=True, parents=True)
os.environ["WANDB_CACHE_DIR"] = str(wandb_cache_dir)
threads = mp.cpu_count()
mp.set_start_method("spawn", force=True)
with progress.Progress(
*progress.Progress.get_default_columns(), progress.TimeElapsedColumn()
) as pbar, mp.Pool(threads) as pool:
print()
progress_task_id = pbar.add_task("Training", total=epochs)
trainer = Trainer(
run.id,
model,
device,
pool,
train_dl,
train_eval_dls,
val_dls,
optimizer,
lr_scheduler,
criterion,
pbar,
output_dir,
logger,
use_wandb=True if wandb_config else False,
)
trainer.on_train_start()
for epoch in range(epochs):
trainer.train_epoch()
if (epoch + 1) % val_every_epoch == 0 and not use_full_dataset:
trainer.val_epoch()
if compute_full_train_loss:
trainer.train_eval_epoch()
if (epoch + 1) % checkpoint_every_epoch == 0:
trainer.save_state_dict(f"epoch-{epoch}")
pbar.advance(progress_task_id)
trainer.on_train_end()
gc.collect()
torch.cuda.empty_cache()
wandb.finish()
| 7,784 | Python | .py | 199 | 29.743719 | 161 | 0.605183 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,974 | dataset.py | JSchlensok_VespaG/vespag/training/dataset.py | from pathlib import Path
import h5py
import numpy as np
import polars as pl
import rich.progress as progress
import torch
from jaxtyping import Float
from vespag.utils.type_hinting import PrecisionType
class PerResidueDataset(torch.utils.data.Dataset):
def __init__(
self,
embedding_file: Path,
annotation_file: Path,
cluster_file: Path,
precision: PrecisionType,
device: torch.device,
max_len: int,
limit_cache: bool = False,
):
self.precision = precision
self.device = device
self.dtype = torch.float if precision == "float" else torch.half
self.limit_cache = limit_cache
self.cluster_df = pl.read_csv(cluster_file)
self.protein_embeddings = {
key: torch.tensor(np.array(data[()]), device=self.device, dtype=self.dtype)
for key, data in progress.track(
h5py.File(embedding_file, "r").items(),
description=f"Loading embeddings from {embedding_file}",
transient=True,
)
if key in self.cluster_df["protein_id"]
}
self.protein_annotations = {
key: torch.tensor(
np.array(data[()][:max_len]), device=self.device, dtype=self.dtype
)
for key, data in progress.track(
h5py.File(annotation_file, "r").items(),
description=f"Loading annotations from {annotation_file}",
transient=True,
)
if key in self.cluster_df["protein_id"]
}
self.residue_embeddings = torch.cat(
[
self.protein_embeddings[protein_id]
for protein_id in progress.track(
self.cluster_df["protein_id"],
description="Pre-loading embeddings",
transient=True,
)
]
)
self.residue_annotations = torch.cat(
[
self.protein_annotations[protein_id]
for protein_id in progress.track(
self.cluster_df["protein_id"],
description="Pre-loading annotations",
transient=True,
)
]
)
def __getitem__(
self, idx
) -> tuple[
Float[torch.Tensor, "length embedding_dim"], Float[torch.Tensor, "length 20"]
]:
embedding = self.residue_embeddings[idx]
annotation = self.residue_annotations[idx]
if self.precision == "half":
embedding = embedding.half()
annotation = annotation.half()
else:
embedding = embedding.float()
annotation = annotation.float()
embedding = embedding.clone()
annotation = annotation.clone()
return embedding, annotation
def __len__(self):
return self.residue_embeddings.shape[0]
| 2,949 | Python | .py | 82 | 25.04878 | 87 | 0.564731 | JSchlensok/VespaG | 8 | 3 | 5 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,975 | build-macos.py | cwhelchel_hunterlog/build-macos.py | import os
import py2app
import shutil
from distutils.core import setup
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), files))
for (root, dirs, files) in os.walk(os.path.normpath(src))]
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist/index.app'):
shutil.rmtree('dist/index.app')
ENTRY_POINT = ['src/index.py']
DATA_FILES = tree('gui')
OPTIONS = {
'argv_emulation': False,
'strip': False,
'iconfile': 'src/assets/logo.icns',
'includes': ['charset_normalizer.md__mypyc'],
'packages': ['WebKit', 'Foundation', 'webview'],
'plist': {
'NSRequiresAquaSystemAppearance': False
},
'resources': DATA_FILES
}
setup(
app=ENTRY_POINT,
name='Hunterlog',
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 829 | Python | .py | 30 | 23.833333 | 66 | 0.662453 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,976 | bands.py | cwhelchel_hunterlog/src/bands.py | '''
This file contains data about the Ham radio bands.
Import Bands for the main enum values, import bandNames for a string of names
import bandLimits for the band edges (not currently configurable). Import
get_band(freq) for a method to take a freq and return a BAND enum.
'''
from enum import Enum
import logging as L
logging = L.getLogger("bands")
class Bands(Enum):
NOBAND = 0
ONESIXTY = 1
EIGHTY = 2
SIXTY = 3
FOURTY = 4
THIRTY = 5
TWENTY = 6
SEVENTEEN = 7
FIFTEEN = 8
TWELVE = 9
TEN = 10
SIX = 11
TWO = 12
ONEPTWOFIVE = 13
SEVENTYCM = 14
THIRTYTHREECM = 15
TWENTYTHREECM = 16
bandNames = [
'NA', '160m', '80m', '60m', '40m', '30m', '20m', '17m', '15m', '12m',
'10m', '6m', '1.25m', '70cm', '33cm', '23cm'
]
bandLimits = {
Bands.ONESIXTY: (1800.0, 2000.0),
Bands.EIGHTY: (3500.0, 4000.0),
Bands.SIXTY: (5330.0, 5410.0),
Bands.FOURTY: (7000.0, 7300.0),
Bands.THIRTY: (10100.0, 10150.0),
Bands.TWENTY: (14000.0, 14350.0),
Bands.SEVENTEEN: (18068.0, 18168.0),
Bands.FIFTEEN: (21000.0, 21450.0),
Bands.TWELVE: (24890.0, 24990.0),
Bands.TEN: (28000.0, 29700.0),
Bands.SIX: (50000.0, 54000.0),
Bands.TWO: (144_000.0, 148_000.0),
Bands.ONEPTWOFIVE: (219_000.0, 225_000.0),
Bands.SEVENTYCM: (420_000.0, 450_000.0),
Bands.THIRTYTHREECM: (902_000.0, 928_000.0),
Bands.TWENTYTHREECM: (1_270_000.0, 1_300_000.0)
}
def get_band(freq: str) -> Bands:
'''
Get the enumerated Bands value for the given frequency
:param str freq: string of the frequency in MHz
'''
try:
f = float(freq)
for band, lmt in bandLimits.items():
if (f > lmt[0] and f < lmt[1]):
return band
except ValueError:
logging.error("invalid str to float conv in get_band(freq)")
return Bands.NOBAND
def get_band_name(freq: str) -> str:
'''
Get band name for the given frequency.
:param str freq: string of the frequency in MHz
'''
try:
f = float(freq)
for band, lmt in bandLimits.items():
if (f >= lmt[0] and f <= lmt[1]):
return bandNames[band.value]
except ValueError:
logging.error("invalid str to float conv in get_band_name(freq)")
return bandNames[Bands.NOBAND]
| 2,346 | Python | .py | 75 | 25.986667 | 77 | 0.620735 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,977 | api.py | cwhelchel_hunterlog/src/api.py | import json
import time
import webview
import logging as L
import datetime
import threading
from datetime import timedelta
from db.db import DataBase
from db.models.activators import Activator, ActivatorSchema
from db.models.parks import ParkSchema
from db.models.qsos import QsoSchema
from db.models.spot_comments import SpotCommentSchema
from db.models.spots import SpotSchema
from db.models.user_config import UserConfigSchema
from pota import PotaApi, PotaStats
from utils.adif import AdifLog
from version import __version__
from cat import CAT
from utils.distance import Distance
logging = L.getLogger("api")
# IDTOKENPAT = r"^.*CognitoIdentityServiceProvider\..+\.idToken=([\w\.-]*\;)"
class JsApi:
def __init__(self):
self.lock = threading.Lock()
self.db = DataBase()
self.pota = PotaApi()
self.adif_log = AdifLog()
logging.debug("init CAT...")
cfg = self.db.get_user_config()
try:
self.cat = CAT(cfg.rig_if_type, cfg.flr_host, cfg.flr_port)
except Exception:
logging.error("Error creating CAT object: ", exc_info=True)
self.cat = None
self.pw = None
def get_spot(self, spot_id: int):
logging.debug('py get_spot')
spot = self.db.spots.get_spot(spot_id)
ss = SpotSchema()
return ss.dumps(spot)
def get_spots(self):
logging.debug('py get_spots')
spots = self.db.spots.get_spots()
ss = SpotSchema(many=True)
return ss.dumps(spots)
def get_spot_comments(self, spot_id: int):
spot = self.db.spots.get_spot(spot_id)
x = self.db.get_spot_comments(spot.activator, spot.reference)
ss = SpotCommentSchema(many=True)
return ss.dumps(x)
def insert_spot_comments(self, spot_id: int):
'''
Pulls the spot comments from the POTA api and inserts them into our
database.
:param int spot_id: spot id. pk in db
'''
spot = self.db.spots.get_spot(spot_id)
comms = self.pota.get_spot_comments(spot.activator, spot.reference)
try:
self.lock.acquire()
self.db.insert_spot_comments(spot.activator, spot.reference, comms)
finally:
self.lock.release()
def get_qso_from_spot(self, id: int):
logging.debug(f'py getting qso data from {id}')
q = self.db.build_qso_from_spot(id)
if q is None:
return {"success": False}
cfg = self.db.get_user_config()
dist = Distance.distance(cfg.my_grid6, q.gridsquare)
bearing = Distance.bearing(cfg.my_grid6, q.gridsquare)
q.distance = dist
q.bearing = bearing
qs = QsoSchema()
return qs.dumps(q)
def get_activator_stats(self, callsign):
logging.debug("getting activator stats...")
ac = self._get_activator(callsign)
if ac is None:
return self._response(False, f"Activator {callsign} not found")
return ActivatorSchema().dumps(ac)
def get_activator_hunts(self, callsign):
logging.debug("getting hunt count stats...")
return self.db.qsos.get_activator_hunts(callsign)
def get_park(self, ref: str, pull_from_pota: bool = True) -> str:
'''
Returns the JSON for the park if found in the db
:param str ref: the POTA park reference designator string
:param bool pull_from_pota: True (default) to try to update when a park
is not in the db.
:returns JSON of park object in db or None if not found
'''
if ref is None:
logging.error("get_park: ref param was None")
return
logging.debug(f"get_park: getting park {ref}")
park = self.db.parks.get_park(ref)
if park is None and pull_from_pota:
logging.debug(f"get_park: park was None {ref}")
api_res = self.pota.get_park(ref)
logging.debug(f"get_park: park from api {api_res}")
self.db.parks.update_park_data(api_res)
park = self.db.parks.get_park(ref)
elif park.name is None:
logging.debug(f"get_park: park Name was None {ref}")
api_res = self.pota.get_park(ref)
logging.debug(f"get_park: park from api {api_res}")
self.db.parks.update_park_data(api_res)
park = self.db.parks.get_park(ref)
ps = ParkSchema()
return ps.dumps(park)
def get_park_hunts(self, ref: str) -> str:
'''
Returns a JSON object containing the number of QSOs with activators at
the given park reference.
:param str ref: the POTA park reference designator string
:returns JSON of park object in db or None if not found
'''
if ref is None:
logging.error("get_park: ref param was None")
return self._response(False, "park references invalid")
park = self.db.parks.get_park(ref)
if park is None:
return self._response(True, "", count=0)
else:
return self._response(True, "", count=park.hunts)
def get_user_config(self):
'''
Returns the JSON for the user configuration record in the db
'''
cfg = self.db.get_user_config()
return UserConfigSchema().dumps(cfg)
def get_version_num(self):
return self._response(
True,
"",
app_ver=__version__,
db_ver=self.db.get_version())
def spot_activator(self, qso_data, park: str) -> str:
'''
Spots the activator at the given park. The QSO data needs to be filled
out for this to work properly. Needs freq, call, and mode
:param any qso_data: dict of qso data from the UI
:param string spot_comment: the comment to add to the spot.
'''
f = qso_data['freq']
a = qso_data['call']
m = qso_data['mode']
r = qso_data['rst_sent']
c = str(qso_data['comment'])
logging.debug(f"sending spot for {a} on {f}")
cfg = self.db.get_user_config()
# if spot+log is used the comment is modified before coming here.
# remove boilerplate fluff and get the users comments for spot
if c.startswith("["):
x = c.index("]") + 1
c = c[x:]
qth = cfg.qth_string
if qth is not None:
spot_comment = f"[{r} {qth}] {c}"
else:
spot_comment = f"[{r}] {c}"
try:
PotaApi.post_spot(activator_call=a,
park_ref=park,
freq=f,
mode=m,
spotter_call=cfg.my_call,
spotter_comments=spot_comment)
except Exception as ex:
msg = "Error posting spot to pota api!"
logging.error(msg)
logging.exception(ex)
return self._response(False, msg)
return self._response(True, "spot posted")
def import_adif(self) -> str:
'''
Opens a Open File Dialog to allow the user to select a ADIF file
containing POTA QSOs to be imported into the app's database.
'''
ft = ('ADIF files (*.adi;*.adif)', 'All files (*.*)')
filename = webview.windows[0] \
.create_file_dialog(
webview.OPEN_DIALOG,
file_types=ft)
if not filename:
return self._response(True, "")
logging.info("starting import of ADIF file...")
AdifLog.import_from_log(filename[0], self.db)
return self._response(True, "Completed ADIF import")
def log_qso(self, qso_data):
'''
Logs the QSO to the database, adif file, and updates stats. Will force
a reload of the currently displayed spots.
:param any qso_data: dict of qso data from the UI
'''
logging.debug('acquiring lock to log qso')
self.lock.acquire()
cfg = self.db.get_user_config()
try:
park_json = self.pota.get_park(qso_data['sig_info'])
logging.debug(f"updating park stat for: {park_json}")
self.db.parks.inc_park_hunt(park_json)
qso_data['tx_pwr'] = cfg.default_pwr
logging.debug(f"logging qso: {qso_data}")
id = self.db.qsos.insert_new_qso(qso_data)
except Exception as ex:
logging.error("Error logging QSO to db:")
logging.exception(ex)
self.lock.release()
return self._response(False, "Error logging QSO.", ext=str(ex))
# get the data to log to the adif file and remote adif host
qso = self.db.qsos.get_qso(id)
act = self.db.get_activator_name(qso_data['call'])
qso.name = act if act is not None else 'ERROR NO NAME'
self.adif_log.log_qso_and_send(qso, cfg)
j = self.pota.get_spots()
self.db.update_all_spots(j)
self.lock.release()
webview.windows[0].evaluate_js(
'window.pywebview.state.getSpots()')
return self._response(True, "QSO logged successfully")
def export_qsos(self):
'''
Exports the QSOs logged with this logger app into a file.
'''
try:
qs = self.db.qsos.get_qsos_from_app()
cfg = self.db.get_user_config()
dt = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log = AdifLog(filename=f"{dt}_export.adi")
for q in qs:
log.log_qso(q, cfg)
except Exception:
logging.exception("Error exporting the DB")
def set_user_config(self, config_json: any):
logging.debug(f"setting config {config_json}")
self.db.update_user_config(config_json)
def set_band_filter(self, band: int):
logging.debug(f"api setting band filter to: {band}")
self.db.set_band_filter(band)
def set_region_filter(self, region: str):
logging.debug(f"api setting region filter to: {region}")
self.db.set_region_filter(region)
def set_location_filter(self, location: str):
logging.debug(f"setting region filter to {location}")
self.db.set_location_filter(location)
def set_qrt_filter(self, is_qrt: bool):
logging.debug(f"api setting qrt filter to: {is_qrt}")
self.db.set_qrt_filter(is_qrt)
def set_hunted_filter(self, filter_hunted: bool):
logging.debug(f"api setting qrt filter to: {filter_hunted}")
self.db.set_hunted_filter(filter_hunted)
def set_only_new_filter(self, filter_only_new: bool):
logging.debug(f"api setting ATNO filter to: {filter_only_new}")
self.db.set_only_new_filter(filter_only_new)
def update_activator_stats(self, callsign: str) -> int:
j = self.pota.get_activator_stats(callsign)
if j is not None:
# the json will be none if say the call doesn't return success
# from api. probably they dont have an account
return self.db.update_activator_stat(j)
else:
logging.warn(f"activator callsign {callsign} not found")
return -1
def launch_pota_window(self):
self.pw = webview.create_window(
title='POTA APP', url='https://pota.app/#/user/stats')
def load_location_data(self):
logging.debug("downloading location data...")
locations = PotaApi.get_locations()
self.db.locations.load_location_data(locations)
return self._response(True, "Downloaded location data successfully")
def qsy_to(self, freq, mode: str):
'''Use CAT control to QSY'''
logging.debug(f"qsy_to {freq} {mode}")
if self.cat is None:
logging.warn("CAT is None. not qsy-ing")
return self._response(False, "CAT control failure.")
cfg = self.db.get_user_config()
x = float(freq) * 1000.0
logging.debug(f"adjusted freq {x}")
if mode == "SSB" and x >= 10000000:
mode = "USB"
elif mode == "SSB" and x < 10000000:
mode = "LSB"
elif mode == "CW":
mode = cfg.cw_mode
elif mode.startswith("FT"):
mode = cfg.ftx_mode
logging.debug(f"adjusted mode {mode}")
self.cat.set_mode(mode)
self.cat.set_vfo(x)
return self._response(True, "")
def update_park_hunts_from_csv(self) -> str:
'''
Will use the current pota stats from hunter.csv to update the db with
new park hunt numbers. It will then update all the parks with data from
the POTA API. This method will run a while depending on how many parks
are in the csv file.
'''
ft = ('CSV files (*.csv;*.txt)', 'All files (*.*)')
filename = webview.windows[0] \
.create_file_dialog(
webview.OPEN_DIALOG,
file_types=ft)
if not filename:
return self._response(True, "user cancelled")
logging.info(f"updating park hunts from {filename[0]}")
stats = PotaStats(filename[0])
hunts = stats.get_all_hunts()
for park in hunts:
count = stats.get_park_hunt_count(park)
j = {'reference': park, 'hunts': count}
self.db.parks.update_park_hunts(j, count)
self.db.commit_session()
return self._update_all_parks()
def export_park_data(self) -> str:
'''
Dumps the entire parks table into a file named 'park_export.json'.
This can then be later used to import. This is useful to avoid having
to download park info from the POTA endpoints.
'''
logging.debug("export_park_data: dumping parks table...")
parks = self.db.parks.get_parks()
schema = ParkSchema()
data = schema.dumps(parks, many=True)
with open("park_export.json", "w") as out:
out.write(data)
return json.dumps({
'success': True,
'message': "park data exported successfully",
})
def import_park_data(self) -> str:
'''
Loads previously exported park data from a file into the parks table.
The opposite of :meth:`export_park_data`
'''
logging.debug("import_park_data: loading table...")
ft = ('JSON files (*.json)', 'All files (*.*)')
filename = webview.windows[0] \
.create_file_dialog(
webview.OPEN_DIALOG,
file_types=ft)
if not filename:
return json.dumps({'success': True, 'message': "user cancel"})
with open(filename[0], "r") as input:
text = input.read()
obj = json.loads(text)
self.db.parks.import_park_data(obj)
return json.dumps({
'success': True,
'message': "park data import successfully",
})
def _do_update(self):
'''
The main update method. Called on a timer
'''
logging.debug('updating db')
self.lock.acquire()
try:
json = self.pota.get_spots()
self.db.update_all_spots(json)
except ConnectionError as con_ex:
logging.warning("Connection error in do_update: ")
logging.exception(con_ex)
except Exception as ex:
logging.error("Unhandled error caught in do_update: ")
logging.error(type(ex).__name__)
logging.exception(ex)
finally:
self.lock.release()
def _update_all_parks(self) -> str:
logging.info("updating all parks in db")
parks = self.db.parks.get_parks()
for park in parks:
if park.name is not None:
continue
api_res = self.pota.get_park(park.reference)
self.db.parks.update_park_data(api_res) # delay_commit=True
time.sleep(0.001) # dont want to hurt POTA
return self._response(
True, "Park Data updated successfully", persist=True)
def _get_activator(self, callsign: str) -> Activator:
''''
Gets the activator model from the db or pulls the data to create a
new one or update and old one.
'''
def update():
logging.info("activator needs update from POTA API...")
id = self.update_activator_stats(callsign)
if id > 0:
activator = self.db.get_activator_by_id(id)
return activator
return None
ac = self.db.get_activator(callsign)
if (ac is None):
# not found pull new data
return update()
else:
# check timestamp
if (datetime.datetime.utcnow() - ac.updated > timedelta(days=1)):
return update()
return ac
def _response(self, success: bool, message: str, **kwargs) -> str:
'''
Returns a dumped json string from the given inputs.
:param bool success: indicates if response is pass or fail
:param str message: default message to return
:param any kwargs: any keyword arguments are included in the json
'''
return json.dumps({
'success': success,
'message': message,
**kwargs
})
def _get_win_size(self) -> tuple[int, int]:
'''
Get the stored windows size.
'''
cfg = self.db.get_user_config()
return (cfg.size_x, cfg.size_y)
def _get_win_pos(self) -> tuple[int, int]:
'''
Get the stored windows position.
'''
cfg = self.db.get_user_config()
return (cfg.pos_x, cfg.pos_y)
def _get_win_maximized(self) -> bool:
'''
Get the stored windows size.
'''
cfg = self.db.get_user_config()
return cfg.is_max
def _store_win_size(self, size: tuple[int, int]):
'''
Save the window size to the database
'''
cfg = self.db.get_user_config()
cfg.size_x = size[0]
cfg.size_y = size[1]
self.db.commit_session()
def _store_win_pos(self, position: tuple[int, int]):
'''
Save the window position to the database
'''
cfg = self.db.get_user_config()
cfg.pos_x = position[0]
cfg.pos_y = position[1]
self.db.commit_session()
def _store_win_maxi(self, is_max: bool):
cfg = self.db.get_user_config()
cfg.is_max = 1 if is_max else 0
self.db.commit_session()
| 18,592 | Python | .py | 454 | 31.101322 | 79 | 0.588369 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,978 | index.py | cwhelchel_hunterlog/src/index.py | import os
import threading
import webview
import logging
import platform
import argparse
from api import JsApi
# put filename='index.log' for deployment
logging.basicConfig(filename='index.log',
encoding='utf-8',
format='%(asctime)s = %(levelname)-7.7s [%(name)s]: %(message)s', # noqa E501
level=logging.DEBUG)
# logging.basicConfig(level=logging.DEBUG)
the_api = JsApi()
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--reset-win", action="store_true",
help="reset the window size and position to default")
def do_update():
logging.debug('updating db')
the_api._do_update()
# try:
# json = pota.get_spots()
# the_db.update_all_spots(json)
# except ConnectionError as con_ex:
# logging.warning("Connection error in do_update: ")
# logging.exception(con_ex)
# except Exception as ex:
# logging.error("Unhandled error caught in do_update: ")
# logging.error(type(ex).__name__)
# logging.exception(ex)
# raise
# first lets update our spots w/ api data
do_update()
def get_entrypoint():
def exists(path):
return os.path.exists(os.path.join(os.path.dirname(__file__), path))
if exists('../gui/index.html'): # unfrozen development
return '../gui/index.html'
if exists('../Resources/gui/index.html'): # frozen py2app
return '../Resources/gui/index.html'
if exists('./gui/index.html'):
return './gui/index.html'
raise Exception('No index.html found')
def set_interval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
entry = get_entrypoint()
def refresh_frontend():
try:
if len(webview.windows) > 0:
js = 'window.pywebview.state.getSpots()'
logging.debug('refreshing spots in frontend: ' + js)
webview.windows[0].evaluate_js(js)
except Exception as ex:
logging.error("error in refresh_frontend")
logging.exception(ex)
raise
@set_interval(60)
def update_ticker():
logging.info("thread heartbeat")
do_update()
refresh_frontend()
def on_closing():
# this crashes on linux
sz = (window.width, window.height)
pos = (window.x, window.y)
logging.debug(f"close: saving winow data: {sz}")
the_api._store_win_size(sz)
the_api._store_win_pos(pos)
def on_maximized():
the_api._store_win_maxi(True)
def on_restore():
the_api._store_win_maxi(False)
if __name__ == '__main__':
args = parser.parse_args()
(width, height) = the_api._get_win_size()
(x, y) = the_api._get_win_pos()
maxi = the_api._get_win_maximized()
if args.reset_win:
logging.info('resetting window size and position to defaults')
(width, height) = (800, 600)
(x, y) = (0, 0)
logging.debug(f"load winow data: {width} x {height} - {maxi}")
webview.settings = {
'ALLOW_DOWNLOADS': False, # Allow file downloads
'ALLOW_FILE_URLS': True, # Allow access to file:// urls
# Open target=_blank links in an external browser
'OPEN_EXTERNAL_LINKS_IN_BROWSER': True,
# Automatically open devtools when `start(debug=True)`.
'OPEN_DEVTOOLS_IN_DEBUG': False,
}
window = webview.create_window(
'HUNTER LOG',
entry,
js_api=the_api,
maximized=maxi,
width=width,
height=height,
x=x,
y=y,
min_size=(800, 600),
text_select=True)
if platform.system() == 'Windows':
window.events.closing += on_closing
window.events.maximized += on_maximized
window.events.restored += on_restore
if platform.system() == "Linux":
webview.start(update_ticker, private_mode=False, debug=True, gui="gtk") # noqa E501
elif platform.system() == "Windows":
webview.start(update_ticker, private_mode=False, debug=True)
elif platform.system() == "Darwin":
webview.start(update_ticker, private_mode=False, debug=True)
| 4,493 | Python | .py | 122 | 29.852459 | 98 | 0.623326 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,979 | upgrades.py | cwhelchel_hunterlog/src/upgrades.py | from alembic_src import versions
import logging as L
# not having this in the file seemed to mess up logging to index.log
# in index.py. alembic issue?
logging = L.getLogger("upgrades")
def do_upgrade():
logging.info('upgrading to head')
versions.upgrade()
def get_version(verbose: bool = False):
logging.info('getting current db version')
return versions.current(verbose)
| 394 | Python | .py | 11 | 32.909091 | 68 | 0.759259 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,980 | cat_interface.py | cwhelchel_hunterlog/src/cat/cat_interface.py | """
K6GTE, CAT interface abstraction
Email: [email protected]
GPL V3
"""
import logging
import socket
import xmlrpc.client
if __name__ == "__main__":
print("I'm not the program you are looking for.")
logger = logging.getLogger("cat")
class CAT:
"""CAT control rigctld or flrig"""
def __init__(self, interface: str, host: str, port: int) -> None:
"""
Computer Aided Tranceiver abstraction class.
Offers a normalized rigctld or flrig interface.
Takes 3 inputs to setup the class.
A string defining the type of interface, either 'flrig' or 'rigctld'.
A string defining the host, example: 'localhost' or '127.0.0.1'
An interger defining the network port used.
Commonly 12345 for flrig, or 4532 for rigctld.
Exposed methods are:
get_vfo()
get_mode()
get_power()
get_ptt()
set_vfo()
set_mode()
set_power()
A variable 'online' is set to True if no error was encountered,
otherwise False.
"""
self.server = None
self.rigctrlsocket = None
self.interface = interface.lower()
self.host = host
self.port = port
self.online = False
if self.interface == "flrig":
target = f"http://{host}:{port}"
logger.debug("%s", target)
self.server = xmlrpc.client.ServerProxy(target)
self.online = True
try:
ver = self.server.main.get_version()
logger.debug(ver)
except ConnectionRefusedError:
self.online = False
if self.interface == "rigctld":
self.__initialize_rigctrld()
def __initialize_rigctrld(self):
try:
self.rigctrlsocket = socket.socket()
self.rigctrlsocket.settimeout(0.5)
self.rigctrlsocket.connect((self.host, self.port))
logger.debug("Connected to rigctrld")
self.online = True
except ConnectionRefusedError as exception:
self.rigctrlsocket = None
self.online = False
logger.debug("%s", exception)
except TimeoutError as exception:
self.rigctrlsocket = None
self.online = False
logger.debug("%s", exception)
def get_vfo(self) -> str:
"""Poll the radio for current vfo using the interface"""
vfo = ""
if self.interface == "flrig":
vfo = self.__getvfo_flrig()
if self.interface == "rigctld":
vfo = self.__getvfo_rigctld()
if "RPRT -" in vfo:
vfo = ""
return vfo
def __getvfo_flrig(self) -> str:
"""Poll the radio using flrig"""
try:
self.online = True
return self.server.rig.get_vfo()
except ConnectionRefusedError as exception:
self.online = False
logger.debug("getvfo_flrig: %s", exception)
return ""
def __getvfo_rigctld(self) -> str:
"""Returns VFO freq returned from rigctld"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(b"\nf\n")
return self.rigctrlsocket.recv(1024).decode().strip()
except socket.error as exception:
self.online = False
logger.debug("getvfo_rigctld: %s", exception)
self.rigctrlsocket = None
return ""
self.__initialize_rigctrld()
return ""
def get_mode(self) -> str:
"""Returns the current mode filter width of the radio"""
mode = ""
if self.interface == "flrig":
mode = self.__getmode_flrig()
if self.interface == "rigctld":
mode = self.__getmode_rigctld()
return mode
def __getmode_flrig(self) -> str:
"""Returns mode via flrig"""
try:
self.online = True
return self.server.rig.get_mode()
except ConnectionRefusedError as exception:
self.online = False
logger.debug("%s", exception)
return ""
def __getmode_rigctld(self) -> str:
"""Returns mode vai rigctld"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(b"m\n")
mode = self.rigctrlsocket.recv(1024).decode()
mode = mode.strip().split()[0]
logger.debug("%s", mode)
return mode
except IndexError as exception:
logger.debug("%s", exception)
except socket.error as exception:
self.online = False
logger.debug("%s", exception)
self.rigctrlsocket = None
return ""
self.__initialize_rigctrld()
return ""
def get_bw(self):
"""Get current vfo bandwidth"""
if self.interface == "flrig":
return self.__getbw_flrig()
if self.interface == "rigctld":
return self.__getbw_rigctld()
return False
def __getbw_flrig(self):
"""ccc"""
try:
self.online = True
return self.server.rig.get_bw()
except ConnectionRefusedError as exception:
self.online = False
logger.debug("getbw_flrig: %s", exception)
return ""
def __getbw_rigctld(self):
"""ccc"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(b"m\n")
mode = self.rigctrlsocket.recv(1024).decode()
mode = mode.strip().split()[1]
logger.debug("%s", mode)
return mode
except IndexError as exception:
logger.debug("%s", exception)
except socket.error as exception:
self.online = False
logger.debug("%s", exception)
self.rigctrlsocket = None
return ""
self.__initialize_rigctrld()
return ""
def get_power(self):
"""Get power level from rig"""
if self.interface == "flrig":
return self.__getpower_flrig()
if self.interface == "rigctld":
return self.__getpower_rigctld()
return False
def __getpower_flrig(self):
try:
self.online = True
return self.server.rig.get_power()
except ConnectionRefusedError as exception:
self.online = False
logger.debug("getpower_flrig: %s", exception)
return ""
def __getpower_rigctld(self):
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(b"l RFPOWER\n")
return int(float(self.rigctrlsocket.recv(1024).decode().strip()) * 100)
except socket.error as exception:
self.online = False
logger.debug("getpower_rigctld: %s", exception)
self.rigctrlsocket = None
return ""
def get_ptt(self):
"""Get PTT state"""
if self.interface == "flrig":
return self.__getptt_flrig()
if self.interface == "rigctld":
return self.__getptt_rigctld()
return False
def __getptt_flrig(self):
"""Returns ptt state via flrig"""
try:
self.online = True
return self.server.rig.get_ptt()
except ConnectionRefusedError as exception:
self.online = False
logger.debug("%s", exception)
return "0"
def __getptt_rigctld(self):
"""Returns ptt state via rigctld"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(b"t\n")
ptt = self.rigctrlsocket.recv(1024).decode()
logger.debug("%s", ptt)
ptt = ptt.strip()
return ptt
except socket.error as exception:
self.online = False
logger.debug("%s", exception)
self.rigctrlsocket = None
return "0"
def set_vfo(self, freq: str) -> bool:
"""Sets the radios vfo"""
if self.interface == "flrig":
return self.__setvfo_flrig(freq)
if self.interface == "rigctld":
return self.__setvfo_rigctld(freq)
return False
def __setvfo_flrig(self, freq: str) -> bool:
"""Sets the radios vfo"""
try:
self.online = True
return self.server.rig.set_frequency(float(freq))
except ConnectionRefusedError as exception:
self.online = False
logger.debug("setvfo_flrig: %s", exception)
return False
def __setvfo_rigctld(self, freq: str) -> bool:
"""sets the radios vfo"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(bytes(f"F {freq}\n", "utf-8"))
_ = self.rigctrlsocket.recv(1024).decode().strip()
return True
except socket.error as exception:
self.online = False
logger.debug("setvfo_rigctld: %s", exception)
self.rigctrlsocket = None
return False
self.__initialize_rigctrld()
return False
def set_mode(self, mode: str) -> bool:
"""Sets the radios mode"""
if self.interface == "flrig":
return self.__setmode_flrig(mode)
if self.interface == "rigctld":
return self.__setmode_rigctld(mode)
return False
def __setmode_flrig(self, mode: str) -> bool:
"""Sets the radios mode"""
try:
self.online = True
return self.server.rig.set_mode(mode)
except ConnectionRefusedError as exception:
self.online = False
logger.debug("setmode_flrig: %s", exception)
return False
def __setmode_rigctld(self, mode: str) -> bool:
"""sets the radios mode"""
if self.rigctrlsocket:
try:
self.online = True
self.rigctrlsocket.send(bytes(f"M {mode} 0\n", "utf-8"))
_ = self.rigctrlsocket.recv(1024).decode().strip()
return True
except socket.error as exception:
self.online = False
logger.debug("setmode_rigctld: %s", exception)
self.rigctrlsocket = None
return False
self.__initialize_rigctrld()
return False
def set_power(self, power):
"""Sets the radios power"""
if self.interface == "flrig":
return self.__setpower_flrig(power)
if self.interface == "rigctld":
return self.__setpower_rigctld(power)
return False
def __setpower_flrig(self, power):
try:
self.online = True
return self.server.rig.set_power(power)
except ConnectionRefusedError as exception:
self.online = False
logger.debug("setmode_flrig: %s", exception)
return False
def __setpower_rigctld(self, power):
if power.isnumeric() and int(power) >= 1 and int(power) <= 100:
rig_cmd = bytes(f"L RFPOWER {str(float(power) / 100)}\n", "utf-8")
try:
self.online = True
self.rigctrlsocket.send(rig_cmd)
_ = self.rigctrlsocket.recv(1024).decode().strip()
except socket.error:
self.online = False
self.rigctrlsocket = None
def ptt_on(self):
"""turn ptt on/off"""
if self.interface == "flrig":
return self.__ptt_on_flrig()
if self.interface == "rigctld":
return self.__ptt_on_rigctld()
return False
def __ptt_on_rigctld(self):
"""Toggle PTT state on"""
# T, set_ptt 'PTT'
# Set 'PTT'.
# PTT is a value: ‘0’ (RX), ‘1’ (TX), ‘2’ (TX mic), or ‘3’ (TX data).
# t, get_ptt
# Get 'PTT' status.
# Returns PTT as a value in set_ptt above.
rig_cmd = bytes("T 1\n", "utf-8")
logger.debug("%s", f"{rig_cmd}")
try:
self.online = True
self.rigctrlsocket.send(rig_cmd)
_ = self.rigctrlsocket.recv(1024).decode().strip()
except socket.error:
self.online = False
self.rigctrlsocket = None
def __ptt_on_flrig(self):
"""Toggle PTT state on"""
try:
self.online = True
return self.server.rig.set_ptt(1)
except ConnectionRefusedError as exception:
self.online = False
logger.debug("%s", exception)
return "0"
def ptt_off(self):
"""turn ptt on/off"""
if self.interface == "flrig":
return self.__ptt_off_flrig()
if self.interface == "rigctld":
return self.__ptt_off_rigctld()
return False
def __ptt_off_rigctld(self):
"""Toggle PTT state off"""
rig_cmd = bytes("T 0\n", "utf-8")
logger.debug("%s", f"{rig_cmd}")
try:
self.online = True
self.rigctrlsocket.send(rig_cmd)
_ = self.rigctrlsocket.recv(1024).decode().strip()
except socket.error:
self.online = False
self.rigctrlsocket = None
def __ptt_off_flrig(self):
"""Toggle PTT state off"""
try:
self.online = True
return self.server.rig.set_ptt(0)
except ConnectionRefusedError as exception:
self.online = False
logger.debug("%s", exception)
return "0"
| 13,883 | Python | .py | 374 | 25.890374 | 87 | 0.542749 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,981 | omnirig_interface.py | cwhelchel_hunterlog/src/cat/omnirig_interface.py | """
KK7JXG simple omnirig CAT control
email:[email protected]
GPL V3
"""
# pyright: ignore[reportOptionalMemberAccess]
import logging
import win32com.client as win32 # pylint: disable=import-error
class OmniRigClient:
"""OmniRig CAT control"""
def __init__(self, rig: int) -> None:
"""
@barryshaffer KK7JXG
My CAT class using Omnirig
will attempt to create in a fashion that can be independantly tested
then injected in K6GTE's cat_interface.py
Takes 1 input to setup the class.
A inteter defining which rig to control, 1 = 'rig1' 2 = 'rig2'.
Exposed methods are:
set_vfo()
set_mode()
get_vfo()
get_bw()
A variable 'online' is set to True if no error was encountered,
otherwise False.
"""
self.rig = rig
self.online = False
self.omnirig_object = None
try:
self.omnirig_object = win32.gencache.EnsureDispatch("OmniRig.OmniRigX")
logging.debug("Connected to Omnirig")
self.online = True
except: # noqa E722 # pylint: disable=bare-except
self.online = False
logging.debug("Omnirig connection failed")
def set_vfo(self, freq: str) -> bool:
"""Sets the radios vfo"""
if self.rig == 1:
self.omnirig_object.Rig1.SetSimplexMode(int(freq))
return True
if self.rig == 2:
self.omnirig_object.Rig2.SetSimplexMode(int(freq))
return True
return False
def set_mode(self, mode: str) -> bool:
"""
Sets the raidos mode
Convert Mode to Omnirig param
"""
if mode == "CW":
omni_mode = 8388608 # CW-U Omnirig Param
elif mode == "USB":
omni_mode = 33554432 # USB Omnirig Param
else:
omni_mode = 67108864 # LSB Omnirig Param
if self.rig == 1:
self.omnirig_object.Rig1.Mode = omni_mode
return True
if self.rig == 2:
self.omnirig_object.Rig2.Mode = omni_mode
return True
return False
def get_vfo(self) -> int:
"""Returns the radios vfo"""
if self.rig == 1:
return self.omnirig_object.Rig1.Freq
if self.rig == 2:
return self.omnirig_object.Rig2.Freq
return False
def get_bw(self) -> int:
"""Returns the radios bandwidth"""
if self.rig == 1:
mode = int(self.omnirig_object.Rig1.Mode)
if mode == 8388608 or mode == 16777216:
return 500
elif mode == 33554432 or mode == 67108864 or mode == 134217728 or mode == 268435456:
return 3000
elif mode == 536870912:
return 6000
else:
return 12000
if self.rig == 2:
mode = int(self.omnirig_object.Rig2.Mode)
if mode == 8388608 or mode == 16777216:
return 500
elif mode == 33554432 or mode == 67108864 or mode == 134217728 or mode == 268435456:
return 3000
elif mode == 536870912:
return 6000
else:
return 12000
return False
| 3,317 | Python | .py | 93 | 25.311828 | 96 | 0.564328 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,982 | db.py | cwhelchel_hunterlog/src/db/db.py | import re
from typing import List
import logging as L
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from bands import Bands
from db.models.qsos import Qso
from db.models.activators import Activator, ActivatorSchema
from db.models.spot_comments import SpotComment, SpotCommentSchema
from db.models.spots import Spot, SpotSchema
from db.models.user_config import UserConfig, UserConfigSchema
from db.park_query import ParkQuery
from db.qso_query import QsoQuery
from db.loc_query import LocationQuery
from db.spot_query import SpotQuery
from utils.callsigns import get_basecall
import upgrades
Base = declarative_base()
logging = L.getLogger("db")
# show sql
# L.getLogger('sqlalchemy.engine').setLevel(L.INFO)
VER_FROM_ALEMBIC = 'f01009b22b92'
'''
This value indicates the version of the DB scheme the app is made for.
TODO: UPDATE THIS VERSION WHENEVER A ALEMBIC MIGRATION IS CREATED. This is
typically done by running `alembic revision` in the root of the project.
'''
class InitQuery:
'''Internal DB queries stored here.'''
def __init__(self, session: scoped_session):
self.session = session
def init_config(self):
current = self.session.query(UserConfig).first()
if current is None:
cs = UserConfigSchema()
logging.debug("creating default user config...")
s = {'my_call': "N0CALL",
'my_grid6': 'FN31pr',
'default_pwr': 1500,
'flr_host': "127.0.0.1",
'flr_port': 12345,
'adif_host': "127.0.0.1",
'adif_port': 12345}
default_config = cs.load(s, session=self.session)
self.session.add(default_config)
self.session.commit()
def init_alembic_ver(self):
v = VER_FROM_ALEMBIC
table_exists = self._check_for_table()
if not table_exists:
self.session.execute(sa.text('CREATE TABLE alembic_version(version_num varchar(32) NOT NULL);')) # noqa E501
self.session.execute(sa.text(f"INSERT INTO alembic_version(version_num) VALUES ('{v}');")) # noqa E501
self.session.commit()
else:
# we need to read the vernum
sql = 'SELECT version_num FROM alembic_version'
v = self.session.execute(sa.text(sql))
db_ver = v.fetchone()[0]
# after this. logging wont work for this execution.
if (db_ver != VER_FROM_ALEMBIC):
upgrades.do_upgrade()
def _check_for_table(self):
sql = """SELECT name FROM sqlite_master WHERE type='table' AND name='alembic_version';""" # noqa E501
r = self.session.execute(sa.text(sql))
return len(r.all()) > 0
class DataBase:
def __init__(self):
engine = sa.create_engine("sqlite:///spots.db", poolclass=sa.NullPool)
self.session = scoped_session(sessionmaker(bind=engine))
Base.metadata.create_all(engine)
self._iq = InitQuery(self.session)
self._lq = LocationQuery(self.session)
self._qq = QsoQuery(self.session)
self._pq = ParkQuery(self.session)
self._sq = SpotQuery(self.session, func=self._get_all_filters)
# do this FIRST. will upgrade the db to latest schema
self._iq.init_alembic_ver()
self._sq.delete_all_spots()
self._iq.init_config()
self.band_filter = Bands.NOBAND
self.region_filter = None
self.location_filter = None
self.qrt_filter_on = True # filter out QRT spots by default
self.hunted_filter_on = False # filter out spots you hunted
self.only_new_on = False # filter out parks you have never worked
def commit_session(self):
'''
Calls session.commit to save any pending changes to db.
May be required when for methods that use `delay_commit` param
'''
self.session.commit()
'''
These properties provide methods that were refactored from this class. If
a method remains, we can assume its to integrated with other parts to be
easily refactored.
'''
@property
def qsos(self) -> QsoQuery:
return self._qq
@property
def parks(self) -> ParkQuery:
return self._pq
@property
def spots(self) -> SpotQuery:
return self._sq
@property
def locations(self) -> LocationQuery:
return self._lq
def update_all_spots(self, spots_json):
'''
Updates all the spots in the database.
First will delete all previous spots, read the ones passed in
and perform the logic to update meta info about the spots
:param dict spots_json: the dict from the pota api
'''
schema = SpotSchema()
self.session.execute(sa.text('DELETE FROM spots;'))
self.session.execute(sa.text('DELETE FROM comments;'))
# self._sq.insert_test_spot() # testing code
for s in spots_json:
to_add: Spot = schema.load(s, session=self.session)
self.session.add(to_add)
# get meta data for this spot
park = self.parks.get_park(to_add.reference)
if park is not None and park.hunts > 0:
to_add.park_hunts = park.hunts
else:
to_add.park_hunts = 0
count = self.qsos.get_op_qso_count(to_add.activator)
to_add.op_hunts = count
hunted = self.qsos.get_spot_hunted_flag(
to_add.activator, to_add.frequency, to_add.reference)
bands = self.qsos.get_spot_hunted_bands(
to_add.activator, to_add.reference)
to_add.hunted = hunted
to_add.hunted_bands = bands
# if park is not None:
if ',' not in to_add.locationDesc:
x, y = self._lq.get_location_hunts(to_add.locationDesc)
# logging.debug(f"got location hunts {x} / {y}")
to_add.loc_hunts = x
to_add.loc_total = y
to_add.is_qrt = False
if to_add.comments is not None:
if re.match(r'.*qrt.*', to_add.comments.lower()):
to_add.is_qrt = True
self.session.commit()
def update_activator_stat(self, activator_stat_json) -> int:
schema = ActivatorSchema()
x = self.get_activator(activator_stat_json['callsign'])
if x is None:
to_add = schema.load(activator_stat_json, session=self.session)
self.session.add(to_add)
x = to_add
else:
# logging.debug(f"updating activator {x.activator_id}")
schema.load(activator_stat_json, session=self.session, instance=x)
self.session.commit()
return x.activator_id
def get_spot_comments(self, activator, park: str) -> List[SpotComment]:
return self.session.query(SpotComment) \
.filter(SpotComment.activator == activator,
SpotComment.park == park) \
.order_by(SpotComment.spotTime.desc()) \
.all()
def get_activator(self, callsign: str) -> Activator:
basecall = get_basecall(callsign)
logging.debug(f"get_activator() basecall {basecall}")
return self.session.query(Activator) \
.filter(Activator.callsign == basecall) \
.first()
def get_activator_name(self, callsign: str) -> str:
act = self.get_activator(callsign)
if act is None:
return ""
return act.name
def get_activator_by_id(self, id: int) -> Activator:
return self.session.query(Activator).get(id)
def get_user_config(self) -> UserConfig:
return self.session.query(UserConfig).first()
def get_version(self) -> str:
sql = 'SELECT version_num FROM alembic_version'
v = self.session.execute(sa.text(sql))
return v.fetchone()[0]
def insert_spot_comments(self,
activator: str,
park: str,
comments: any):
# TESTING. leave out for now. maybe add back. seems we can leave
# comments in place and it doesn't matter if it tries to add more...
# sql = sa.text(f"DELETE FROM comments WHERE activator='{activator}' AND park='{park}' ;") # noqa E501
# self.session.execute(sql)
# self.session.commit()
if comments is None:
return
for x in comments:
x["activator"] = activator
x["park"] = park
# logging.debug(f"inserting {comments}")
ss = SpotCommentSchema(many=True)
to_add = ss.load(comments, session=self.session)
self.session.add_all(to_add)
self.session.commit()
# grab more info from spot comments
self._sq._update_comment_metadata(activator, park)
def update_user_config(self, json: any):
schema = UserConfigSchema()
config = self.get_user_config()
schema.load(json, session=self.session, instance=config)
self.session.commit()
def build_qso_from_spot(self, spot_id: int) -> Qso:
'''
Builds a new `Qso` with data in the spot table.
Also uses data from Activators table.
:param int spot_id: the spot PK.
:returns an untracked `Qso` object with initialized data.
'''
s = self.spots.get_spot(spot_id)
if (s is None):
q = Qso()
return q
a = self.get_activator(s.activator)
name = a.name if a is not None else ""
q = Qso()
q.init_from_spot(s, name)
return q
def set_band_filter(self, band: Bands):
logging.debug(f"db setting band filter to {band}")
self.band_filter = band
def set_region_filter(self, region: str):
logging.debug(f"db setting region filter to {region}")
self.region_filter = region
def set_location_filter(self, location: str):
logging.debug(f"db setting location filter to {location}")
self.location_filter = location
def set_qrt_filter(self, is_on: bool):
logging.debug(f"db setting QRT filter to {is_on}")
self.qrt_filter_on = is_on
def set_hunted_filter(self, is_on: bool):
logging.debug(f"db setting hunted filter to {is_on}")
self.hunted_filter_on = is_on
def set_only_new_filter(self, is_on: bool):
logging.debug(f"db setting ATNO filter to {is_on}")
self.only_new_on = is_on
def _get_all_filters(self) -> list[sa.ColumnElement[bool]]:
return self._get_band_filters() + \
self._get_region_filters() + \
self._get_location_filters() + \
self._get_qrt_filter() + \
self._get_hunted_filter() + \
self._get_only_new_filter()
def _get_band_filters(self) -> list[sa.ColumnElement[bool]]:
band = Bands(self.band_filter) # not sure why cast is needed
if band == Bands.NOBAND:
return []
terms = QsoQuery.get_band_lmt_terms(band, Spot.frequency)
return terms
def _get_region_filters(self) -> list[sa.ColumnElement[bool]]:
region = self.region_filter
if (region is None):
return []
terms = [Spot.locationDesc.startswith(region)]
return terms
def _get_location_filters(self) -> list[sa.ColumnElement[bool]]:
loc = self.location_filter
if (loc is None or loc == ''):
return []
terms = [Spot.locationDesc.contains(loc)]
return terms
def _get_qrt_filter(self) -> list[sa.ColumnElement[bool]]:
qrt = self.qrt_filter_on
if qrt:
return [Spot.is_qrt == False] # noqa E712
terms = []
return terms
def _get_hunted_filter(self) -> list[sa.ColumnElement[bool]]:
hunt_filter = self.hunted_filter_on
if hunt_filter:
return [Spot.hunted == False] # noqa E712
terms = []
return terms
def _get_only_new_filter(self) -> list[sa.ColumnElement[bool]]:
new_filter = self.only_new_on
logging.debug(f'newfilter is {new_filter}')
if new_filter:
return [Spot.park_hunts == 0] # noqa E712
terms = []
return terms
| 12,397 | Python | .py | 289 | 33.49481 | 121 | 0.612769 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,983 | utc.py | cwhelchel_hunterlog/src/db/utc.py | from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
inherit_cache = True
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
@compiles(utcnow, 'sqlite')
def sl_utcnow(element, compiler, **kw):
return "CURRENT_TIMESTAMP"
| 530 | Python | .py | 15 | 32.466667 | 47 | 0.763314 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,984 | loc_query.py | cwhelchel_hunterlog/src/db/loc_query.py | import sqlalchemy as sa
from sqlalchemy.orm import scoped_session
from db.models.location import Location, LocationSchema
from db.models.parks import Park
from db.models.qsos import Qso
import logging as L
logging = L.getLogger('location_query')
class LocationQuery:
'''Internal DB queries stored here.'''
def __init__(self, session: scoped_session):
self.session = session
def load_location_data(self, data: dict):
'''
Load data location data from the the POTA api into the database.
:param dict data: the dict of json data.
'''
logging.debug("load_location_data. entry")
ls = LocationSchema()
self.clear_locations()
for program in data:
prog_id = program['programId']
for entity in program['entities']:
entity_id = entity['entityId']
for location in entity['locations']:
loc: Location = ls.load(location, session=self.session)
loc.entityId = entity_id
loc.programId = prog_id
self.session.add(loc)
self.session.commit()
def get_location(self, locationId: int) -> Location:
return self.session.query(Location).get(locationId)
def get_location_by_desc(self, descriptor: str) -> Location:
'''
Given the Location descriptor ("US-AK", "CA-MB"), return a location
'''
return self.session.query(Location) \
.filter(Location.descriptor == descriptor) \
.first()
def get_location_hunts(self, descriptor: str) -> tuple[int, int]:
'''
For a location, returns the number of parks hunted and the total number
of parks.
:param str descriptor: location id (ex "US-AK", "CA-MB")
:returns tuple[0] = hunt count; tuple[1] = total park number
'''
loc = self.get_location_by_desc(descriptor)
if loc is None:
return (0, 0)
total = loc.parks
hunts = self.session.query(Park.reference).distinct() \
.join(Qso, Park.reference == Qso.sig_info) \
.join(Location, sa.and_(Location.descriptor.contains(descriptor),
Park.locationDesc.contains(descriptor))) \
.count()
return (hunts, total)
def clear_locations(self):
self.session.execute(sa.text("DELETE FROM LOCATIONS;"))
self.session.commit()
| 2,481 | Python | .py | 58 | 32.931034 | 79 | 0.610557 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,985 | qso_query.py | cwhelchel_hunterlog/src/db/qso_query.py | from datetime import datetime
import logging
from typing import List
import sqlalchemy as sa
from sqlalchemy.orm import scoped_session
from db.models.qsos import Qso
from bands import Bands, get_band, bandLimits, bandNames
class QsoQuery:
'''Store Queries for the QSO table here.'''
def __init__(self, session: scoped_session):
self.session = session
def insert_qso(self, qso: Qso, delay_commit: bool = True):
self.session.add(qso)
if not delay_commit:
self.session.commit()
def insert_new_qso(self, qso: any) -> int:
'''
Logs the QSO passed in from UI.
:param any qso: json from the frontend.
'''
def trim_z(input: str):
temp: str = input
if temp.endswith('Z'):
# fromisoformat doesn't like trailing Z
temp = temp[:-1]
return temp
# passing in the QSO object from init_from_spot
# doesn't seem to ever work. recreate a QSO object
# and add it directly
logging.debug(f"inserting qso: {qso}")
q = Qso()
q.call = qso['call']
q.rst_sent = qso['rst_sent']
q.rst_recv = qso['rst_recv']
q.freq = qso['freq']
q.freq_rx = qso['freq_rx']
q.mode = qso['mode']
q.comment = qso['comment']
temp: str = trim_z(qso['qso_date'])
q.qso_date = datetime.fromisoformat(temp)
temp: str = trim_z(qso['time_on'])
q.time_on = datetime.fromisoformat(temp)
q.tx_pwr = qso['tx_pwr']
q.rx_pwr = qso['rx_pwr']
q.gridsquare = qso['gridsquare']
q.state = qso['state']
q.sig = qso['sig']
q.sig_info = qso['sig_info']
q.distance = qso['distance']
q.bearing = qso['bearing']
q.from_app = True
q.cnfm_hunt = False
self.session.add(q)
self.session.commit()
return q.qso_id
def get_op_qso_count(self, call: str) -> int:
return self.session.query(Qso) \
.filter(Qso.call == call) \
.count()
def get_activator_hunts(self, callsign: str) -> int:
return self.session.query(Qso) \
.filter(Qso.call == callsign) \
.count()
def get_qso(self, id: int) -> Qso:
return self.session.query(Qso).get(id)
def get_qsos_from_app(self) -> List[Qso]:
x = self.session.query(Qso) \
.filter(Qso.from_app == True).all() # noqa E712
return x
def get_spot_hunted_flag(self,
activator: str,
freq: str,
ref: str) -> bool:
'''
Gets the flag indicating if a given spot has been hunted already today
:param str activator: activators callsign
:param str freq: frequency in MHz
:param str ref: the park reference (ex K-7465)
:returns true if the spot has already been hunted
'''
now = datetime.utcnow()
band = get_band(freq)
# logging.debug(f"using band {band} for freq {freq}")
if band is not None:
terms = QsoQuery.get_band_lmt_terms(band, Qso.freq)
else:
terms = [1 == 1]
flag = self.session.query(Qso) \
.filter(Qso.call == activator,
Qso.time_on > now.date(),
Qso.sig_info == ref,
sa.and_(*terms)) \
.count() > 0
return flag
def get_spot_hunted_bands(self, activator: str, ref: str) -> str:
'''
Gets the string of all hunted bands, this spot has been hunted today
:param str activator: activators callsign
:param str ref: park reference
:returns list of hunted bands for today
'''
now = datetime.utcnow()
result = ""
hunted_b = []
qsos = self.session.query(Qso) \
.filter(Qso.call == activator,
Qso.sig_info == ref,
Qso.time_on > now.date()) \
.all()
for q in qsos:
band = get_band(q.freq)
if band is None:
logging.warn(f"unknown band for freq {q.freq}")
else:
hunted_b.append(bandNames[band.value])
result = ",".join(hunted_b)
return result
@staticmethod
def get_band_lmt_terms(band: Bands, col: sa.Column) \
-> list[sa.ColumnElement[bool]]:
if band == Bands.NOBAND:
return []
ll = bandLimits[band][0]
ul = bandLimits[band][1]
terms = [sa.cast(col, sa.Float) < ul,
sa.cast(col, sa.Float) > ll]
return terms
| 4,720 | Python | .py | 127 | 26.937008 | 78 | 0.543883 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,986 | spot_query.py | cwhelchel_hunterlog/src/db/spot_query.py | import datetime
from typing import Callable
import sqlalchemy as sa
from sqlalchemy.orm import scoped_session
import re
import logging as L
from db.models.spot_comments import SpotComment
from db.models.spots import Spot
logging = L.getLogger("spot_query")
class SpotQuery:
def __init__(self,
session: scoped_session,
func: Callable[[], list[sa.ColumnElement[bool]]]):
'''
Ctor for SpotQuery
:param scoped_session session: the db session object
:param func: callback function returning terms list for filter
'''
self.session = session
self._get_filters_cb = func
def delete_all_spots(self):
self.session.execute(sa.text('DELETE FROM spots;'))
self.session.commit()
def get_spots(self):
'''
Get all the spots after applying the current filters: band, region, and
QRT filters
'''
if self._get_filters_cb is None:
return None
terms = self._get_filters_cb()
x = self.session.query(Spot) \
.filter(sa.and_(*terms)) \
.all()
return x
def get_spot(self, id: int) -> Spot:
return self.session.query(Spot).get(id)
def get_spot_by_actx(self, activator: str, park: str) -> Spot:
return self.session.query(Spot) \
.filter(
sa.and_(Spot.activator == activator,
Spot.reference == park)) \
.first()
def insert_test_spot(self):
# test data
test = Spot()
test.activator = "N9FZ"
test.reference = "K-TEST"
test.grid6 = "FL31vt"
test.spotTime = datetime.datetime.utcnow()
test.spotter = "HUNTER-LOG"
test.mode = "CW"
test.locationDesc = "TC-TC"
test.latitude = "21.8022"
test.longitude = "-72.17"
test.name = "TEST"
test.parkName = "TEST"
test.comments = "A TEST SPOT FROM HL"
test.frequency = "7200"
test.hunted_bands = ""
test.is_qrt = False
test.hunted = False
self.session.add(test)
self.session.commit()
test_cmt = SpotComment()
test_cmt.activator = 'N9FZ'
test_cmt.spotId = test.spotId
test_cmt.spotter = 'W1AW'
test_cmt.frequency = '7200'
test_cmt.mode = 'CW'
test_cmt.park = 'K-TEST'
test_cmt.comments = "{this is a test} {With: N0CALL,W1AW}"
test_cmt.source = "test"
test_cmt.band = "40m"
test_cmt.spotTime = datetime.datetime.now()
self.session.add(test_cmt)
self.session.commit()
def _update_comment_metadata(self, activator: str, park: str):
logging.debug(f"_update_comment_metadata: {activator} at {park}")
wpm = r'^RBN \d+ dB (\d+) WPM.*'
spot = self.get_spot_by_actx(activator, park)
if spot is None:
return
act_comments = []
comments = self.session.query(SpotComment) \
.filter(
sa.and_(SpotComment.activator == activator,
SpotComment.park == park))\
.all()
for c in comments:
if c.source == "RBN" and c.mode == "CW":
m = re.match(wpm, c.comments)
if m and spot.cw_wpm is None:
# logging.debug(f"got wpm {m.group(1)}")
spot.cw_wpm = m.group(1)
if c.spotter == activator:
# logging.debug(f"appending activator cmt {c.comments}")
act_comments.append(c.comments)
spot.act_cmts = "|".join(act_comments)
self.session.commit()
| 3,687 | Python | .py | 100 | 27.18 | 79 | 0.567749 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,987 | __init__.py | cwhelchel_hunterlog/src/db/__init__.py | from .models.qsos import *
from .models.spot_comments import *
from .models.spots import *
from .models.user_config import *
from db.db import DataBase
| 153 | Python | .py | 5 | 29.4 | 35 | 0.795918 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,988 | park_query.py | cwhelchel_hunterlog/src/db/park_query.py | import logging
import sqlalchemy as sa
from sqlalchemy.orm import scoped_session
from db.models.parks import Park, ParkSchema
class ParkQuery:
def __init__(self, session: scoped_session):
self.session = session
def get_park(self, park: str) -> Park:
return self.session.query(Park) \
.filter(Park.reference == park) \
.first()
def get_parks(self) -> list[Park]:
return self.session.query(Park).all()
def insert_parks(self, parks: list[Park]):
self.session.add_all(parks)
self.session.commit()
def delete_parks(self):
self.session.execute(sa.text("DELETE FROM parks;"))
self.session.commit()
def import_park_data(self, json_obj: dict):
schema = ParkSchema()
data = schema.load(json_obj, session=self.session, many=True)
self.insert_parks(data)
def update_park_data(self, park: any, delay_commit: bool = False):
'''
Parks added from stats do not have anything besides hunt count and
the reference. This method updates the rest of the data.
:param any park: the json for a POTA park returned from POTA api
:param bool delay_commit: true to not commit the session
'''
if park is None:
return
schema = ParkSchema()
p = self.get_park(park['reference'])
if p is None:
logging.debug(f"inserting new {park['reference']}")
to_add: Park = schema.load(park, session=self.session)
# logging.debug(to_add)
self.session.add(to_add)
p = to_add
else:
logging.debug(f"updating data for for park {p.reference}")
p.name = park['name']
p.grid4 = park['grid4']
p.grid6 = park['grid6']
p.active = park['active']
p.latitude = park['latitude']
p.longitude = park['longitude']
p.parkComments = park['parkComments']
p.accessibility = park['accessibility']
p.sensitivity = park['sensitivity']
p.accessMethods = park['accessMethods']
p.activationMethods = park['activationMethods']
p.agencies = park['agencies']
p.agencyURLs = park['agencyURLs']
p.parkURLs = park['parkURLs']
p.parktypeId = park['parktypeId']
p.parktypeDesc = park['parktypeDesc']
p.locationDesc = park['locationDesc']
p.locationName = park['locationName']
p.entityId = park['entityId']
p.entityName = park['entityName']
p.referencePrefix = park['referencePrefix']
p.entityDeleted = park['entityDeleted']
p.firstActivator = park['firstActivator']
p.firstActivationDate = park['firstActivationDate']
p.firstActivationDate = park['firstActivationDate']
p.website = park['website']
if not delay_commit:
self.session.commit()
def inc_park_hunt(self, park: any):
'''
Increment the hunt count of a park by one. If park is not in db add it.
:param any park: the json for a POTA park returned from POTA api
'''
schema = ParkSchema()
if park is None:
# user logged something w/o a park
return
p = self.get_park(park['reference'])
if p is None:
logging.debug(f"adding new park row for {park['reference']}")
to_add: Park = schema.load(park, session=self.session)
to_add.hunts = 1
# logging.debug(to_add)
self.session.add(to_add)
p = to_add
else:
logging.debug(f"increment hunts for park {p.reference}")
p.hunts += 1
schema.load(park, session=self.session, instance=p)
self.session.commit()
def update_park_hunts(self, park: any, hunts: int,
delay_commit: bool = True):
'''
Update the hunts field of a park in the db with the given hunt. Will
create a park row if none exists
:param any park: park json/dic
:param int hunts: new hunts value
:param bool delay_commit: if true will not call session.commit
'''
schema = ParkSchema()
obj = self.get_park(park['reference'])
if obj is None:
# logging.debug(f"adding new park row for {park}")
# to_add: Park = schema.load(park, session=self.session)
to_add = Park()
to_add.reference = park['reference']
to_add.hunts = hunts
# logging.debug(to_add)
self.session.add(to_add)
obj = to_add
else:
# logging.debug(f"increment hunts for park {obj.reference}")
# if this was hunted in the app and the the stats are imported
# this will overwrite and may clear previous hunts
obj.hunts = hunts
schema.load(park, session=self.session, instance=obj)
if not delay_commit:
self.session.commit()
| 5,118 | Python | .py | 120 | 31.925 | 79 | 0.587503 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,989 | user_config.py | cwhelchel_hunterlog/src/db/models/user_config.py | from enum import Enum
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class UserConfig(Base):
__tablename__ = "config"
id = sa.Column(sa.Integer, primary_key=True)
my_call = sa.Column(sa.String)
my_grid6 = sa.Column(sa.String(6))
default_pwr = sa.Column(sa.Integer)
flr_host = sa.Column(sa.String)
flr_port = sa.Column(sa.Integer)
adif_host = sa.Column(sa.String)
adif_port = sa.Column(sa.Integer)
logger_type = sa.Column(sa.Integer, default=0)
size_x = sa.Column(sa.Integer, default=800)
size_y = sa.Column(sa.Integer, default=600)
is_max = sa.Column(sa.Boolean, default=False)
cw_mode = sa.Column(sa.String, default='CW')
ftx_mode = sa.Column(sa.String, default='USB')
qth_string = sa.Column(sa.String, nullable=True)
rig_if_type = sa.Column(sa.String, default="flrig")
pos_x = sa.Column(sa.Integer, default=0)
pos_y = sa.Column(sa.Integer, default=0)
class LoggerType(Enum):
Tcp = 0
UdpLog4om = 1
Aclog = 2
def __repr__(self):
return "<config({self.my_call!r}:{self.my_grid6!r})>" \
.format(self=self)
class UserConfigSchema(SQLAlchemyAutoSchema):
class Meta:
model = UserConfig
load_instance = True
Base.metadata.create_all(engine)
| 1,455 | Python | .py | 38 | 33.157895 | 63 | 0.683239 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,990 | qsos.py | cwhelchel_hunterlog/src/db/models/qsos.py | import datetime
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from db.models.spots import Spot
from db.utc import utcnow
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class Qso(Base):
__tablename__ = "qsos"
qso_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
call = sa.Column(sa.String)
name = sa.Column(sa.String)
state = sa.Column(sa.String) # kinda useless for multi-state parks
rst_sent = sa.Column(sa.String)
rst_recv = sa.Column(sa.String)
freq = sa.Column(sa.String)
freq_rx = sa.Column(sa.String)
mode = sa.Column(sa.String(15))
comment = sa.Column(sa.String)
qso_date = sa.Column(sa.Date)
time_on = sa.Column(sa.TIMESTAMP, server_default=utcnow())
tx_pwr = sa.Column(sa.Integer)
rx_pwr = sa.Column(sa.Integer)
gridsquare = sa.Column(sa.String(6))
distance = sa.Column(sa.Float, nullable=True)
bearing = sa.Column(sa.Float, nullable=True)
sig = sa.Column(sa.String)
sig_info = sa.Column(sa.String)
# custom app-only data:
from_app = sa.Column(sa.Boolean, nullable=True) # true if logged from app
cnfm_hunt = sa.Column(sa.Boolean, nullable=True)
# 👆 true confirmed from hunter.csv
def init_from_spot(self, spot: Spot, name: str):
rst = self.get_default_rst(spot.mode)
self.call = spot.activator
self.name = name
self.state = self.get_state(spot.locationDesc)
self.rst_sent = rst
self.rst_recv = rst
self.freq = spot.frequency
self.freq_rx = spot.frequency
self.mode = spot.mode
self.qso_date = spot.spotTime
self.gridsquare = spot.grid6
self.sig_info = spot.reference
self.sig = 'POTA' # todo support SOTA
def get_default_rst(self, mode: str) -> str:
if (mode in ["SSB", "PHONE"]):
return "59"
if (mode == "CW"):
return "599"
if (mode in ["FT8", "FT4", "DATA"]):
return "+00"
return ""
def get_state(self, locationDesc: str) -> str:
if not locationDesc or locationDesc == 'None': # None for k-test
return ''
x = locationDesc
if ',' in locationDesc:
# take the first one
x = locationDesc.split(',')[0]
pre, post = x.split('-')
if pre in ["US", "CA"]:
return post
return ''
def init_from_adif(self, adif: dict):
'''
Init the fields from dictionary of ADIF files. see adif-io in utils.
The QSO object created is assumed to be a POTA qso.
There's a lot we don't import, namely any MY_ fields or Operator data.
It's assumed to be the configured user is the my part of this.
'''
f = float(adif['FREQ'] if 'FREQ' in adif.keys() else '-1.0')
fs = str(f * 1000) if f >= 0 else ''
qd = datetime.datetime(
int(adif['QSO_DATE'][:4]),
int(adif['QSO_DATE'][4:6]),
int(adif['QSO_DATE'][6:]))
qt = datetime.datetime(
int(adif['QSO_DATE'][:4]),
int(adif['QSO_DATE'][4:6]),
int(adif['QSO_DATE'][6:]),
int(adif['TIME_ON'][:2]),
int(adif['TIME_ON'][2:4]),
int(adif['TIME_ON'][4:]))
self.call = adif['CALL']
self.name = adif['NAME'] if 'NAME' in adif.keys() else ''
self.state = adif['STATE'] if 'STATE' in adif.keys() else ''
self.rst_sent = adif['RST_SENT'] if 'RST_SENT' in adif.keys() else self.get_default_rst(adif['MODE']) # noqa E501
self.rst_recv = adif['RST_RCVD'] if 'RST_RCVD' in adif.keys() else self.get_default_rst(adif['MODE']) # noqa E501
self.freq = fs
self.freq_rx = fs
self.mode = adif['MODE']
self.comment = adif['COMMENT'] if 'COMMENT' in adif.keys() else ''
self.qso_date = qd
self.time_on = qt
self.gridsquare = adif['GRIDSQUARE'] if 'GRIDSQUARE' in adif.keys() else '' # noqa: E501
self.sig_info = adif['SIG_INFO'] if 'SIG_INFO' in adif.keys() else ''
# if we're importing from adif we may have a SIG_INFO with no SIG if so
# go ahead and fix it (the checks look for valid pota park format in)
self.sig = adif['SIG'] if 'SIG' in adif.keys() else 'POTA'
self.tx_pwr = adif['TX_PWR'] if 'TX_PWR' in adif.keys() else ''
self.from_app = False
self.cnfm_hunt = True
# print(self)
def __repr__(self):
return "<qso({self.qso_id!r}:{self.call!r} on {self.qso_date!r})>" \
.format(self=self)
class QsoSchema(SQLAlchemyAutoSchema):
class Meta:
model = Qso
load_instance = True
Base.metadata.create_all(engine)
| 4,848 | Python | .py | 114 | 34.570175 | 122 | 0.598345 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,991 | location.py | cwhelchel_hunterlog/src/db/models/location.py | import sqlalchemy as sa
import marshmallow as ma
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class Location(Base):
__tablename__ = "locations"
# maps to JSON type for a location from https://api.pota.app/locations/
# location is the next lowest level, above park
locationId = sa.Column(sa.Integer, primary_key=True)
descriptor = sa.Column(sa.String)
name = sa.Column(sa.String)
latitude = sa.Column(sa.Float)
longitude = sa.Column(sa.Float)
parks = sa.Column(sa.Integer)
# could be FKs but nah maybe a lookup from RAW JSON
entityId = sa.Column(sa.Integer)
programId = sa.Column(sa.Integer)
def __repr__(self):
return "<location(id={self.locationId!r})>".format(self=self)
class LocationSchema(SQLAlchemyAutoSchema):
class Meta:
model = Location
load_instance = True
unknown = ma.EXCLUDE
Base.metadata.create_all(engine)
| 1,060 | Python | .py | 27 | 34.62963 | 75 | 0.72434 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,992 | parks.py | cwhelchel_hunterlog/src/db/models/parks.py | import sqlalchemy as sa
import marshmallow as ma
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from db.utc import utcnow
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class Park(Base):
__tablename__ = "parks"
id = sa.Column(sa.Integer, primary_key=True)
reference = sa.Column(sa.String, nullable=False)
name = sa.Column(sa.String)
grid4 = sa.Column(sa.String(4))
grid6 = sa.Column(sa.String(6))
active = sa.Column(sa.Integer)
latitude = sa.Column(sa.Float, nullable=True)
longitude = sa.Column(sa.Float, nullable=True)
parkComments = sa.Column(sa.String)
accessibility = sa.Column(sa.String)
sensitivity = sa.Column(sa.String)
accessMethods = sa.Column(sa.String)
activationMethods = sa.Column(sa.String)
agencies = sa.Column(sa.String)
agencyURLs = sa.Column(sa.String)
parkURLs = sa.Column(sa.String)
parktypeId = sa.Column(sa.Integer)
parktypeDesc = sa.Column(sa.String) # full name is name + parktypeDesc
locationDesc = sa.Column(sa.String)
locationName = sa.Column(sa.String)
entityId = sa.Column(sa.Integer)
entityName = sa.Column(sa.String)
referencePrefix = sa.Column(sa.String)
entityDeleted = sa.Column(sa.Integer)
firstActivator = sa.Column(sa.String)
firstActivationDate = sa.Column(sa.String)
website = sa.Column(sa.String)
# meta and calculated data
hunts = sa.Column(sa.Integer, default=0)
last = sa.Column(sa.TIMESTAMP,
server_default=utcnow(),
onupdate=sa.func.current_timestamp())
def __repr__(self):
return "<park({self.id!r}:{self.reference!r} {self.hunts!r} )>" \
.format(self=self)
class ParkSchema(SQLAlchemyAutoSchema):
class Meta:
model = Park
load_instance = True
# there's a bunch we don't care about in the JSON from the API
unknown = ma.EXCLUDE
Base.metadata.create_all(engine)
| 2,039 | Python | .py | 51 | 34.333333 | 75 | 0.694992 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,993 | activators.py | cwhelchel_hunterlog/src/db/models/activators.py | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class Activator(Base):
__tablename__ = "activators"
activator_id = sa.Column(sa.Integer, primary_key=True)
callsign = sa.Column(sa.String)
name = sa.Column(sa.String)
qth = sa.Column(sa.String)
gravatar = sa.Column(sa.String)
activator = sa.Column(sa.JSON)
attempts = sa.Column(sa.JSON)
hunter = sa.Column(sa.JSON)
endorsements = sa.Column(sa.Integer)
awards = sa.Column(sa.Integer)
updated = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(),
onupdate=sa.func.current_timestamp())
def __repr__(self):
return "<activator(id={self.activator_id!r})>".format(self=self)
class ActivatorSchema(SQLAlchemyAutoSchema):
class Meta:
model = Activator
load_instance = True
Base.metadata.create_all(engine)
| 1,021 | Python | .py | 26 | 33.961538 | 72 | 0.70618 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,994 | spot_comments.py | cwhelchel_hunterlog/src/db/models/spot_comments.py | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class SpotComment(Base):
__tablename__ = "comments"
spotId = sa.Column(sa.Integer, primary_key=True)
spotTime = sa.Column(sa.DateTime)
spotter = sa.Column(sa.String)
mode = sa.Column(sa.String(15))
frequency = sa.Column(sa.String)
band = sa.Column(sa.String(15))
source = sa.Column(sa.String)
comments = sa.Column(sa.String)
activator = sa.Column(sa.String, nullable=True)
park = sa.Column(sa.String, nullable=True)
def __repr__(self):
return "<comment({self.spotId!r}:{self.comments!r})>".format(self=self)
class SpotCommentSchema(SQLAlchemyAutoSchema):
class Meta:
model = SpotComment
load_instance = True
Base.metadata.create_all(engine)
| 937 | Python | .py | 24 | 34.458333 | 79 | 0.72093 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,995 | spots.py | cwhelchel_hunterlog/src/db/models/spots.py | import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
Base = declarative_base()
engine = sa.create_engine("sqlite:///spots.db")
class Spot(Base):
__tablename__ = "spots"
spotId = sa.Column(sa.Integer, primary_key=True)
activator = sa.Column(sa.String)
frequency = sa.Column(sa.String)
mode = sa.Column(sa.String(15))
reference = sa.Column(sa.String(15))
parkName = sa.Column(sa.String, nullable=True)
spotTime = sa.Column(sa.DateTime)
spotter = sa.Column(sa.String())
comments = sa.Column(sa.String())
source = sa.Column(sa.String())
invalid = sa.Column(sa.Boolean, nullable=True)
name = sa.Column(sa.String())
locationDesc = sa.Column(sa.String)
grid4 = sa.Column(sa.String(4))
grid6 = sa.Column(sa.String(6))
latitude = sa.Column(sa.Float)
longitude = sa.Column(sa.Float)
count = sa.Column(sa.Integer())
expire = sa.Column(sa.Integer())
# meta data regarding this "activation" (activator+park+utcday) as it
# applies to this specific spot
hunted = sa.Column(sa.Boolean, nullable=True) # has this spot been hunted?
hunted_bands = sa.Column(sa.String, nullable=True) # list of bands hunted
# stats for this spot ie. park and op hunts
park_hunts = sa.Column(sa.Integer, nullable=True)
op_hunts = sa.Column(sa.Integer, nullable=True)
loc_hunts = sa.Column(sa.Integer, nullable=True)
loc_total = sa.Column(sa.Integer, nullable=True)
# to be calculated by app
is_qrt = sa.Column(sa.Boolean, nullable=True)
# pulled from spot comments
act_cmts = sa.Column(sa.String, nullable=True)
cw_wpm = sa.Column(sa.Integer, nullable=True)
def __repr__(self):
return "<spot(id={self.spotId!r})>".format(self=self)
class SpotSchema(SQLAlchemyAutoSchema):
class Meta:
model = Spot
load_instance = True
Base.metadata.create_all(engine)
| 1,980 | Python | .py | 47 | 37.297872 | 79 | 0.700156 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,996 | callsigns.py | cwhelchel_hunterlog/src/utils/callsigns.py |
def get_basecall(callsign: str) -> str:
'''
Get the base component of a given callsign (ie. the callsign without '/P'
suffixes or country prefixes ie 'W4/').
'''
if callsign is None:
return ""
if "/" in callsign:
basecall = max(
callsign.split("/")[0],
callsign.split("/")[1],
key=len)
else:
basecall = callsign
return basecall
| 422 | Python | .py | 15 | 20.866667 | 77 | 0.558025 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,997 | distance.py | cwhelchel_hunterlog/src/utils/distance.py | '''
This file is basically taken directly from augratin project. thx
'''
from math import radians, sin, cos, asin, sqrt, atan2, pi
class Distance:
@staticmethod
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance in kilometers between two points
on the earth (specified in decimal degrees)
"""
# convert degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
aye = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
cee = 2 * asin(sqrt(aye))
arrgh = 6372.8 # Radius of earth in kilometers.
return cee * arrgh
@staticmethod
def grid_to_latlon(maiden):
"""
Converts a maidenhead gridsquare to a latitude longitude pair.
"""
maiden = str(maiden).strip().upper()
length = len(maiden)
if not 8 >= length >= 2 and length % 2 == 0:
return 0, 0
lon = (ord(maiden[0]) - 65) * 20 - 180
lat = (ord(maiden[1]) - 65) * 10 - 90
if length >= 4:
lon += (ord(maiden[2]) - 48) * 2
lat += ord(maiden[3]) - 48
if length >= 6:
lon += (ord(maiden[4]) - 65) / 12 + 1 / 24
lat += (ord(maiden[5]) - 65) / 24 + 1 / 48
if length >= 8:
lon += (ord(maiden[6])) * 5.0 / 600
lat += (ord(maiden[7])) * 2.5 / 600
return lat, lon
@staticmethod
def distance(grid1: str, grid2: str) -> float:
"""
Takes two maidenhead gridsquares and returns the distance between the
two in kilometers.
"""
lat1, lon1 = Distance.grid_to_latlon(grid1)
lat2, lon2 = Distance.grid_to_latlon(grid2)
return round(Distance.haversine(lon1, lat1, lon2, lat2))
@staticmethod
def distance_miles(grid1: str, grid2: str) -> float:
"""
Takes two maidenhead gridsquares and returns the distance between the
two in miles.
"""
return round(Distance.distance(grid1, grid2) * 0.621371)
@staticmethod
def bearing(grid1: str, grid2: str) -> float:
"""
Takes two maidenhead gridsquares and returns the bearing from the first
to the second
"""
lat1, lon1 = Distance.grid_to_latlon(grid1)
lat2, lon2 = Distance.grid_to_latlon(grid2)
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
londelta = lon2 - lon1
why = sin(londelta) * cos(lat2)
exs = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(londelta)
brng = atan2(why, exs)
brng *= 180 / pi
if brng < 0:
brng += 360
return round(brng)
| 2,858 | Python | .py | 77 | 28.545455 | 79 | 0.562184 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,998 | adif.py | cwhelchel_hunterlog/src/utils/adif.py |
import datetime
import logging as L
import os
import socket
import bands
import adif_io
import re
from db.db import DataBase
from db.models.qsos import Qso
from db.models.user_config import UserConfig
from version import __version__
logging = L.getLogger("adif_log")
BACKUP_LOG_FN = "hunter.adi"
class AdifLog():
def __init__(self, filename: str = BACKUP_LOG_FN):
self.filename = filename
self._init_adif_log()
def log_qso_and_send(self, qso: Qso, config: UserConfig):
'''
Logs the QSO the the ADIF file and sends a UDP msg to the remote host.
'''
logging.debug(f"logging as {config.logger_type}")
if config.logger_type == config.LoggerType.Aclog.value:
type = socket.SOCK_STREAM
qso_adif = self._get_adif(qso, config.my_call, config.my_grid6)
# TODO: needs to be even more granular for ACLOG b/c there is a
# more feature rich version that can pull in more QSO data, send to
# LOTW, QRZ, etc (its FROM WD4DAN)
adif = f"<CMD><ADDADIFRECORD><VALUE>{qso_adif}</VALUE></CMD>"
elif config.logger_type == config.LoggerType.UdpLog4om.value:
type = socket.SOCK_DGRAM
adif = self._get_adif(qso, config.my_call, config.my_grid6)
elif config.logger_type == config.LoggerType.Tcp.value:
type = socket.SOCK_STREAM
adif = self._get_adif(qso, config.my_call, config.my_grid6)
self._send_msg(adif, config.adif_host, config.adif_port, type)
self.write_adif_log(adif)
def log_qso(self, qso: Qso, config: UserConfig):
'''
Logs the QSO the the ADIF file.
'''
adif = self._get_adif(qso, config.my_call, config.my_grid6)
self.write_adif_log(adif)
def write_adif_log(self, adif):
with open(self.filename, "a", encoding='UTF-8') as file:
file.write(adif + "\n")
@staticmethod
def import_from_log(file_name: str, the_db: DataBase):
'''
Imports the ADIF records from the given file into the given Database.
:param str file_name: the path of the ADIF file to import.
:param DataBase the_db: the instance of the DataBase object to insert
qso records into.
'''
logging.info(f"importing adif from {file_name}")
pattern = r'([A-Z0-9]+-[0-9]*)'
if os.path.exists(file_name):
qsos, header = adif_io.read_from_file(file_name)
logging.debug(f"adif hdr {header}")
for qso in qsos:
q = Qso()
sig_check = ('SIG' in qso.keys() and qso['SIG'] == 'POTA')
sig_info_check = ('SIG_INFO' in qso.keys()
and re.match(pattern, qso["SIG_INFO"]))
if (sig_check or sig_info_check):
if not sig_info_check:
# we got pota sig but no sig_info
# check the comments
if 'COMMENT' in qso.keys():
m = re.findall(pattern, qso['COMMENT'])
sig_info = m[0]
qso['SIG_INFO'] = sig_info
q.init_from_adif(qso)
the_db.qsos.insert_qso(q)
the_db.commit_session()
def _init_adif_log(self):
filename = self.filename
if not os.path.exists(filename):
with open(filename, "w", encoding='UTF-8') as f:
v = self._get_adif_field("programversion", __version__)
pid = self._get_adif_field("programid", "hunterlog")
f.write("HUNTER LOG backup log\n")
f.write(f"Created {datetime.datetime.now()}\n")
f.write(pid)
f.write(v)
f.write("<EOH>\n")
def _send_msg(self, msg: str, host: str, port: int, type: int):
"""
Send a UDP adif message to a remote endpoint
"""
logging.debug(f"logging to {host}:{port} with data {msg}")
try:
with socket.socket(socket.AF_INET, type) as sock:
sock.connect((host, port))
sock.send(msg.encode())
except Exception as err:
logging.error("_send_msg exception:", err)
def _get_adif_field(self, field_name: str, field_data: str) -> str:
return f"<{field_name.upper()}:{len(field_data)}>{field_data}\n"
def _get_adif(self, qso: Qso, my_call: str, my_grid6: str) -> str:
band_name = bands.get_band_name(qso.freq)
# todo:
# self._get_adif_field("distance", qso.sig_info) +
# self._get_adif_field("STATE", qso.park_state) +
# silly but w/e
f: float = float(qso.freq) / 1000.0
fs = str(f)
q_date = qso.qso_date.strftime('%Y%m%d')
q_time_on = qso.time_on.strftime('%H%M%S')
state = qso.state if qso.state else ''
adif = self._get_adif_field("band", band_name) + \
self._get_adif_field("call", qso.call) + \
self._get_adif_field("name", qso.name if qso.name else '') + \
self._get_adif_field("comment", qso.comment) + \
self._get_adif_field("sig", qso.sig) + \
self._get_adif_field("sig_info", qso.sig_info) + \
self._get_adif_field("gridsquare", qso.gridsquare) + \
self._get_adif_field("state", state) + \
self._get_adif_field("distance", str(qso.distance)) + \
self._get_adif_field("ant_az", str(qso.bearing)) + \
self._get_adif_field("tx_pwr", str(qso.tx_pwr)) + \
self._get_adif_field("mode", qso.mode) + \
self._get_adif_field("operator", my_call) + \
self._get_adif_field("rst_rcvd", qso.rst_recv) + \
self._get_adif_field("rst_sent", qso.rst_sent) + \
self._get_adif_field("freq", fs) + \
self._get_adif_field("qso_date", q_date) + \
self._get_adif_field("time_on", q_time_on) + \
self._get_adif_field("my_gridsquare", my_grid6) + \
"<EOR>\n"
return adif
| 6,143 | Python | .py | 131 | 35.625954 | 79 | 0.556854 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
2,287,999 | __init__.py | cwhelchel_hunterlog/src/pota/__init__.py | from .pota import Api as PotaApi
from .stats import PotaStats | 61 | Python | .py | 2 | 30 | 32 | 0.833333 | cwhelchel/hunterlog | 8 | 0 | 4 | GPL-3.0 | 9/5/2024, 10:48:18 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.