code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import torch import torch.nn as nn import torch.nn.functional as F #"""The Attention Module is built by pre-activation Residual Unit [11] with the #number of channels in each stage is the same as ResNet [10].""" class PreActResidualUnit(nn.Module): """PreAct Residual Unit Args: in_channels: residual unit input channel number out_channels: residual unit output channel numebr stride: stride of residual unit when stride = 2, downsample the featuremap """ def __init__(self, in_channels, out_channels, stride): super().__init__() bottleneck_channels = int(out_channels / 4) self.residual_function = nn.Sequential( #1x1 conv nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, bottleneck_channels, 1, stride), #3x3 conv nn.BatchNorm2d(bottleneck_channels), nn.ReLU(inplace=True), nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1), #1x1 conv nn.BatchNorm2d(bottleneck_channels), nn.ReLU(inplace=True), nn.Conv2d(bottleneck_channels, out_channels, 1) ) self.shortcut = nn.Sequential() if stride != 2 or (in_channels != out_channels): self.shortcut = nn.Conv2d(in_channels, out_channels, 1, stride=stride) def forward(self, x): res = self.residual_function(x) shortcut = self.shortcut(x) return res + shortcut class AttentionModule1(nn.Module): def __init__(self, in_channels, out_channels, p=1, t=2, r=1): super().__init__() #"""The hyperparameter p denotes the number of preprocessing Residual #Units before splitting into trunk branch and mask branch. t denotes #the number of Residual Units in trunk branch. r denotes the number of #Residual Units between adjacent pooling layer in the mask branch.""" assert in_channels == out_channels self.pre = self._make_residual(in_channels, out_channels, p) self.trunk = self._make_residual(in_channels, out_channels, t) self.soft_resdown1 = self._make_residual(in_channels, out_channels, r) self.soft_resdown2 = self._make_residual(in_channels, out_channels, r) self.soft_resdown3 = self._make_residual(in_channels, out_channels, r) self.soft_resdown4 = self._make_residual(in_channels, out_channels, r) self.soft_resup1 = self._make_residual(in_channels, out_channels, r) self.soft_resup2 = self._make_residual(in_channels, out_channels, r) self.soft_resup3 = self._make_residual(in_channels, out_channels, r) self.soft_resup4 = self._make_residual(in_channels, out_channels, r) self.shortcut_short = PreActResidualUnit(in_channels, out_channels, 1) self.shortcut_long = PreActResidualUnit(in_channels, out_channels, 1) self.sigmoid = nn.Sequential( nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.Sigmoid() ) self.last = self._make_residual(in_channels, out_channels, p) def forward(self, x): ###We make the size of the smallest output map in each mask branch 7*7 to be consistent #with the smallest trunk output map size. ###Thus 3,2,1 max-pooling layers are used in mask branch with input size 56 * 56, 28 * 28, 14 * 14 respectively. x = self.pre(x) input_size = (x.size(2), x.size(3)) x_t = self.trunk(x) #first downsample out 28 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown1(x_s) #28 shortcut shape1 = (x_s.size(2), x_s.size(3)) shortcut_long = self.shortcut_long(x_s) #seccond downsample out 14 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown2(x_s) #14 shortcut shape2 = (x_s.size(2), x_s.size(3)) shortcut_short = self.soft_resdown3(x_s) #third downsample out 7 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown3(x_s) #mid x_s = self.soft_resdown4(x_s) x_s = self.soft_resup1(x_s) #first upsample out 14 x_s = self.soft_resup2(x_s) x_s = F.interpolate(x_s, size=shape2) x_s += shortcut_short #second upsample out 28 x_s = self.soft_resup3(x_s) x_s = F.interpolate(x_s, size=shape1) x_s += shortcut_long #thrid upsample out 54 x_s = self.soft_resup4(x_s) x_s = F.interpolate(x_s, size=input_size) x_s = self.sigmoid(x_s) x = (1 + x_s) * x_t x = self.last(x) return x def _make_residual(self, in_channels, out_channels, p): layers = [] for _ in range(p): layers.append(PreActResidualUnit(in_channels, out_channels, 1)) return nn.Sequential(*layers) class AttentionModule2(nn.Module): def __init__(self, in_channels, out_channels, p=1, t=2, r=1): super().__init__() #"""The hyperparameter p denotes the number of preprocessing Residual #Units before splitting into trunk branch and mask branch. t denotes #the number of Residual Units in trunk branch. r denotes the number of #Residual Units between adjacent pooling layer in the mask branch.""" assert in_channels == out_channels self.pre = self._make_residual(in_channels, out_channels, p) self.trunk = self._make_residual(in_channels, out_channels, t) self.soft_resdown1 = self._make_residual(in_channels, out_channels, r) self.soft_resdown2 = self._make_residual(in_channels, out_channels, r) self.soft_resdown3 = self._make_residual(in_channels, out_channels, r) self.soft_resup1 = self._make_residual(in_channels, out_channels, r) self.soft_resup2 = self._make_residual(in_channels, out_channels, r) self.soft_resup3 = self._make_residual(in_channels, out_channels, r) self.shortcut = PreActResidualUnit(in_channels, out_channels, 1) self.sigmoid = nn.Sequential( nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.Sigmoid() ) self.last = self._make_residual(in_channels, out_channels, p) def forward(self, x): x = self.pre(x) input_size = (x.size(2), x.size(3)) x_t = self.trunk(x) #first downsample out 14 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown1(x_s) #14 shortcut shape1 = (x_s.size(2), x_s.size(3)) shortcut = self.shortcut(x_s) #seccond downsample out 7 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown2(x_s) #mid x_s = self.soft_resdown3(x_s) x_s = self.soft_resup1(x_s) #first upsample out 14 x_s = self.soft_resup2(x_s) x_s = F.interpolate(x_s, size=shape1) x_s += shortcut #second upsample out 28 x_s = self.soft_resup3(x_s) x_s = F.interpolate(x_s, size=input_size) x_s = self.sigmoid(x_s) x = (1 + x_s) * x_t x = self.last(x) return x def _make_residual(self, in_channels, out_channels, p): layers = [] for _ in range(p): layers.append(PreActResidualUnit(in_channels, out_channels, 1)) return nn.Sequential(*layers) class AttentionModule3(nn.Module): def __init__(self, in_channels, out_channels, p=1, t=2, r=1): super().__init__() assert in_channels == out_channels self.pre = self._make_residual(in_channels, out_channels, p) self.trunk = self._make_residual(in_channels, out_channels, t) self.soft_resdown1 = self._make_residual(in_channels, out_channels, r) self.soft_resdown2 = self._make_residual(in_channels, out_channels, r) self.soft_resup1 = self._make_residual(in_channels, out_channels, r) self.soft_resup2 = self._make_residual(in_channels, out_channels, r) self.shortcut = PreActResidualUnit(in_channels, out_channels, 1) self.sigmoid = nn.Sequential( nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.Sigmoid() ) self.last = self._make_residual(in_channels, out_channels, p) def forward(self, x): x = self.pre(x) input_size = (x.size(2), x.size(3)) x_t = self.trunk(x) #first downsample out 14 x_s = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) x_s = self.soft_resdown1(x_s) #mid x_s = self.soft_resdown2(x_s) x_s = self.soft_resup1(x_s) #first upsample out 14 x_s = self.soft_resup2(x_s) x_s = F.interpolate(x_s, size=input_size) x_s = self.sigmoid(x_s) x = (1 + x_s) * x_t x = self.last(x) return x def _make_residual(self, in_channels, out_channels, p): layers = [] for _ in range(p): layers.append(PreActResidualUnit(in_channels, out_channels, 1)) return nn.Sequential(*layers) class Attention(nn.Module): """residual attention netowrk Args: block_num: attention module number for each stage """ def __init__(self, block_num, class_num=100): super().__init__() self.pre_conv = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True) ) self.stage1 = self._make_stage(64, 256, block_num[0], AttentionModule1) self.stage2 = self._make_stage(256, 512, block_num[1], AttentionModule2) self.stage3 = self._make_stage(512, 1024, block_num[2], AttentionModule3) self.stage4 = nn.Sequential( PreActResidualUnit(1024, 2048, 2), PreActResidualUnit(2048, 2048, 1), PreActResidualUnit(2048, 2048, 1) ) self.avg = nn.AdaptiveAvgPool2d(1) self.linear = nn.Linear(2048, 100) def forward(self, x): x = self.pre_conv(x) x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.avg(x) x = x.view(x.size(0), -1) x = self.linear(x) return x def _make_stage(self, in_channels, out_channels, num, block): layers = [] layers.append(PreActResidualUnit(in_channels, out_channels, 2)) for _ in range(num): layers.append(block(out_channels, out_channels)) return nn.Sequential(*layers) def attention56(): return Attention([1, 1, 1]) def attention92(): return Attention([1, 2, 3])
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/cifar100/models/attention.py
attention.py
import torch import torch.nn as nn import torch.nn.functional as F def channel_split(x, split): """split a tensor into two pieces along channel dimension Args: x: input tensor split:(int) channel size for each pieces """ assert x.size(1) == split * 2 return torch.split(x, split, dim=1) def channel_shuffle(x, groups): """channel shuffle operation Args: x: input tensor groups: input branch number """ batch_size, channels, height, width = x.size() channels_per_group = int(channels // groups) x = x.view(batch_size, groups, channels_per_group, height, width) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, height, width) return x class ShuffleUnit(nn.Module): def __init__(self, in_channels, out_channels, stride): super().__init__() self.stride = stride self.in_channels = in_channels self.out_channels = out_channels if stride != 1 or in_channels != out_channels: self.residual = nn.Sequential( nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, int(out_channels / 2), 1), nn.BatchNorm2d(int(out_channels / 2)), nn.ReLU(inplace=True) ) self.shortcut = nn.Sequential( nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, int(out_channels / 2), 1), nn.BatchNorm2d(int(out_channels / 2)), nn.ReLU(inplace=True) ) else: self.shortcut = nn.Sequential() in_channels = int(in_channels / 2) self.residual = nn.Sequential( nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, 3, stride=stride, padding=1, groups=in_channels), nn.BatchNorm2d(in_channels), nn.Conv2d(in_channels, in_channels, 1), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True) ) def forward(self, x): if self.stride == 1 and self.out_channels == self.in_channels: shortcut, residual = channel_split(x, int(self.in_channels / 2)) else: shortcut = x residual = x shortcut = self.shortcut(shortcut) residual = self.residual(residual) x = torch.cat([shortcut, residual], dim=1) x = channel_shuffle(x, 2) return x class ShuffleNetV2(nn.Module): def __init__(self, ratio=1, class_num=100): super().__init__() if ratio == 0.5: out_channels = [48, 96, 192, 1024] elif ratio == 1: out_channels = [116, 232, 464, 1024] elif ratio == 1.5: out_channels = [176, 352, 704, 1024] elif ratio == 2: out_channels = [244, 488, 976, 2048] else: ValueError('unsupported ratio number') self.pre = nn.Sequential( nn.Conv2d(3, 24, 3, padding=1), nn.BatchNorm2d(24) ) self.stage2 = self._make_stage(24, out_channels[0], 3) self.stage3 = self._make_stage(out_channels[0], out_channels[1], 7) self.stage4 = self._make_stage(out_channels[1], out_channels[2], 3) self.conv5 = nn.Sequential( nn.Conv2d(out_channels[2], out_channels[3], 1), nn.BatchNorm2d(out_channels[3]), nn.ReLU(inplace=True) ) self.fc = nn.Linear(out_channels[3], class_num) def forward(self, x): x = self.pre(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.conv5(x) x = F.adaptive_avg_pool2d(x, 1) x = x.view(x.size(0), -1) x = self.fc(x) return x def _make_stage(self, in_channels, out_channels, repeat): layers = [] layers.append(ShuffleUnit(in_channels, out_channels, 2)) while repeat: layers.append(ShuffleUnit(out_channels, out_channels, 1)) repeat -= 1 return nn.Sequential(*layers) def shufflenetv2(): return ShuffleNetV2()
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/cifar100/models/shufflenetv2.py
shufflenetv2.py
all_models = [ "vgg16", "vgg13", "vgg11", "vgg19", "densenet121", "densenet161", "densenet169", "densenet201", "googlenet", "inceptionv3", "inceptionv4", "inceptionresnetv2", "xception", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "preactresnet18", "preactresnet34", "preactresnet50", "preactresnet101", "preactresnet152", "resnext50", "resnext101", "resnext152", "shufflenet", "shufflenetv2", "squeezenet", "mobilenet", "mobilenetv2", "nasnet", "attention56", "attention92", "seresnet18", "seresnet34", "seresnet50", "seresnet101", "seresnet152", "wideresnet", "stochasticdepth18", "stochasticdepth34", "stochasticdepth50", "stochasticdepth101", ] def get_model(name: str): """Instantiate the designated DNN model.""" if name == "vgg16": from models.vgg import vgg16_bn model = vgg16_bn() elif name == "vgg13": from models.vgg import vgg13_bn model = vgg13_bn() elif name == "vgg11": from models.vgg import vgg11_bn model = vgg11_bn() elif name == "vgg19": from models.vgg import vgg19_bn model = vgg19_bn() elif name == "densenet121": from models.densenet import densenet121 model = densenet121() elif name == "densenet161": from models.densenet import densenet161 model = densenet161() elif name == "densenet169": from models.densenet import densenet169 model = densenet169() elif name == "densenet201": from models.densenet import densenet201 model = densenet201() elif name == "googlenet": from models.googlenet import googlenet model = googlenet() elif name == "inceptionv3": from models.inceptionv3 import inceptionv3 model = inceptionv3() elif name == "inceptionv4": from models.inceptionv4 import inceptionv4 model = inceptionv4() elif name == "inceptionresnetv2": from models.inceptionv4 import inception_resnet_v2 model = inception_resnet_v2() elif name == "xception": from models.xception import xception model = xception() elif name == "resnet18": from models.resnet import resnet18 model = resnet18() elif name == "resnet34": from models.resnet import resnet34 model = resnet34() elif name == "resnet50": from models.resnet import resnet50 model = resnet50() elif name == "resnet101": from models.resnet import resnet101 model = resnet101() elif name == "resnet152": from models.resnet import resnet152 model = resnet152() elif name == "preactresnet18": from models.preactresnet import preactresnet18 model = preactresnet18() elif name == "preactresnet34": from models.preactresnet import preactresnet34 model = preactresnet34() elif name == "preactresnet50": from models.preactresnet import preactresnet50 model = preactresnet50() elif name == "preactresnet101": from models.preactresnet import preactresnet101 model = preactresnet101() elif name == "preactresnet152": from models.preactresnet import preactresnet152 model = preactresnet152() elif name == "resnext50": from models.resnext import resnext50 model = resnext50() elif name == "resnext101": from models.resnext import resnext101 model = resnext101() elif name == "resnext152": from models.resnext import resnext152 model = resnext152() elif name == "shufflenet": from models.shufflenet import shufflenet model = shufflenet() elif name == "shufflenetv2": from models.shufflenetv2 import shufflenetv2 model = shufflenetv2() elif name == "squeezenet": from models.squeezenet import squeezenet model = squeezenet() elif name == "mobilenet": from models.mobilenet import mobilenet model = mobilenet() elif name == "mobilenetv2": from models.mobilenetv2 import mobilenetv2 model = mobilenetv2() elif name == "nasnet": from models.nasnet import nasnet model = nasnet() elif name == "attention56": from models.attention import attention56 model = attention56() elif name == "attention92": from models.attention import attention92 model = attention92() elif name == "seresnet18": from models.senet import seresnet18 model = seresnet18() elif name == "seresnet34": from models.senet import seresnet34 model = seresnet34() elif name == "seresnet50": from models.senet import seresnet50 model = seresnet50() elif name == "seresnet101": from models.senet import seresnet101 model = seresnet101() elif name == "seresnet152": from models.senet import seresnet152 model = seresnet152() elif name == "wideresnet": from models.wideresidual import wideresnet model = wideresnet() elif name == "stochasticdepth18": from models.stochasticdepth import stochastic_depth_resnet18 model = stochastic_depth_resnet18() elif name == "stochasticdepth34": from models.stochasticdepth import stochastic_depth_resnet34 model = stochastic_depth_resnet34() elif name == "stochasticdepth50": from models.stochasticdepth import stochastic_depth_resnet50 model = stochastic_depth_resnet50() elif name == "stochasticdepth101": from models.stochasticdepth import stochastic_depth_resnet101 model = stochastic_depth_resnet101() else: raise NotImplemented(f"Model {name} is not supported.") return model
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/cifar100/models/__init__.py
__init__.py
import torch import torch.nn as nn class BasicConv2d(nn.Module): def __init__(self, input_channels, output_channels, **kwargs): super().__init__() self.conv = nn.Conv2d(input_channels, output_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(output_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x #same naive inception module class InceptionA(nn.Module): def __init__(self, input_channels, pool_features): super().__init__() self.branch1x1 = BasicConv2d(input_channels, 64, kernel_size=1) self.branch5x5 = nn.Sequential( BasicConv2d(input_channels, 48, kernel_size=1), BasicConv2d(48, 64, kernel_size=5, padding=2) ) self.branch3x3 = nn.Sequential( BasicConv2d(input_channels, 64, kernel_size=1), BasicConv2d(64, 96, kernel_size=3, padding=1), BasicConv2d(96, 96, kernel_size=3, padding=1) ) self.branchpool = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(input_channels, pool_features, kernel_size=3, padding=1) ) def forward(self, x): #x -> 1x1(same) branch1x1 = self.branch1x1(x) #x -> 1x1 -> 5x5(same) branch5x5 = self.branch5x5(x) #branch5x5 = self.branch5x5_2(branch5x5) #x -> 1x1 -> 3x3 -> 3x3(same) branch3x3 = self.branch3x3(x) #x -> pool -> 1x1(same) branchpool = self.branchpool(x) outputs = [branch1x1, branch5x5, branch3x3, branchpool] return torch.cat(outputs, 1) #downsample #Factorization into smaller convolutions class InceptionB(nn.Module): def __init__(self, input_channels): super().__init__() self.branch3x3 = BasicConv2d(input_channels, 384, kernel_size=3, stride=2) self.branch3x3stack = nn.Sequential( BasicConv2d(input_channels, 64, kernel_size=1), BasicConv2d(64, 96, kernel_size=3, padding=1), BasicConv2d(96, 96, kernel_size=3, stride=2) ) self.branchpool = nn.MaxPool2d(kernel_size=3, stride=2) def forward(self, x): #x - > 3x3(downsample) branch3x3 = self.branch3x3(x) #x -> 3x3 -> 3x3(downsample) branch3x3stack = self.branch3x3stack(x) #x -> avgpool(downsample) branchpool = self.branchpool(x) #"""We can use two parallel stride 2 blocks: P and C. P is a pooling #layer (either average or maximum pooling) the activation, both of #them are stride 2 the filter banks of which are concatenated as in #figure 10.""" outputs = [branch3x3, branch3x3stack, branchpool] return torch.cat(outputs, 1) #Factorizing Convolutions with Large Filter Size class InceptionC(nn.Module): def __init__(self, input_channels, channels_7x7): super().__init__() self.branch1x1 = BasicConv2d(input_channels, 192, kernel_size=1) c7 = channels_7x7 #In theory, we could go even further and argue that one can replace any n × n #convolution by a 1 × n convolution followed by a n × 1 convolution and the #computational cost saving increases dramatically as n grows (see figure 6). self.branch7x7 = nn.Sequential( BasicConv2d(input_channels, c7, kernel_size=1), BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)), BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) ) self.branch7x7stack = nn.Sequential( BasicConv2d(input_channels, c7, kernel_size=1), BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)), BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)), BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)), BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) ) self.branch_pool = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(input_channels, 192, kernel_size=1), ) def forward(self, x): #x -> 1x1(same) branch1x1 = self.branch1x1(x) #x -> 1layer 1*7 and 7*1 (same) branch7x7 = self.branch7x7(x) #x-> 2layer 1*7 and 7*1(same) branch7x7stack = self.branch7x7stack(x) #x-> avgpool (same) branchpool = self.branch_pool(x) outputs = [branch1x1, branch7x7, branch7x7stack, branchpool] return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, input_channels): super().__init__() self.branch3x3 = nn.Sequential( BasicConv2d(input_channels, 192, kernel_size=1), BasicConv2d(192, 320, kernel_size=3, stride=2) ) self.branch7x7 = nn.Sequential( BasicConv2d(input_channels, 192, kernel_size=1), BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)), BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)), BasicConv2d(192, 192, kernel_size=3, stride=2) ) self.branchpool = nn.AvgPool2d(kernel_size=3, stride=2) def forward(self, x): #x -> 1x1 -> 3x3(downsample) branch3x3 = self.branch3x3(x) #x -> 1x1 -> 1x7 -> 7x1 -> 3x3 (downsample) branch7x7 = self.branch7x7(x) #x -> avgpool (downsample) branchpool = self.branchpool(x) outputs = [branch3x3, branch7x7, branchpool] return torch.cat(outputs, 1) #same class InceptionE(nn.Module): def __init__(self, input_channels): super().__init__() self.branch1x1 = BasicConv2d(input_channels, 320, kernel_size=1) self.branch3x3_1 = BasicConv2d(input_channels, 384, kernel_size=1) self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3stack_1 = BasicConv2d(input_channels, 448, kernel_size=1) self.branch3x3stack_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) self.branch3x3stack_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3stack_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(input_channels, 192, kernel_size=1) ) def forward(self, x): #x -> 1x1 (same) branch1x1 = self.branch1x1(x) # x -> 1x1 -> 3x1 # x -> 1x1 -> 1x3 # concatenate(3x1, 1x3) #"""7. Inception modules with expanded the filter bank outputs. #This architecture is used on the coarsest (8 × 8) grids to promote #high dimensional representations, as suggested by principle #2 of Section 2.""" branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3) ] branch3x3 = torch.cat(branch3x3, 1) # x -> 1x1 -> 3x3 -> 1x3 # x -> 1x1 -> 3x3 -> 3x1 #concatenate(1x3, 3x1) branch3x3stack = self.branch3x3stack_1(x) branch3x3stack = self.branch3x3stack_2(branch3x3stack) branch3x3stack = [ self.branch3x3stack_3a(branch3x3stack), self.branch3x3stack_3b(branch3x3stack) ] branch3x3stack = torch.cat(branch3x3stack, 1) branchpool = self.branch_pool(x) outputs = [branch1x1, branch3x3, branch3x3stack, branchpool] return torch.cat(outputs, 1) class InceptionV3(nn.Module): def __init__(self, num_classes=100): super().__init__() self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, padding=1) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3, padding=1) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) #naive inception module self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) #downsample self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) #downsample self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) #6*6 feature size self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout2d() self.linear = nn.Linear(2048, num_classes) def forward(self, x): #32 -> 30 x = self.Conv2d_1a_3x3(x) x = self.Conv2d_2a_3x3(x) x = self.Conv2d_2b_3x3(x) x = self.Conv2d_3b_1x1(x) x = self.Conv2d_4a_3x3(x) #30 -> 30 x = self.Mixed_5b(x) x = self.Mixed_5c(x) x = self.Mixed_5d(x) #30 -> 14 #Efficient Grid Size Reduction to avoid representation #bottleneck x = self.Mixed_6a(x) #14 -> 14 #"""In practice, we have found that employing this factorization does not #work well on early layers, but it gives very good results on medium #grid-sizes (On m × m feature maps, where m ranges between 12 and 20). #On that level, very good results can be achieved by using 1 × 7 convolutions #followed by 7 × 1 convolutions.""" x = self.Mixed_6b(x) x = self.Mixed_6c(x) x = self.Mixed_6d(x) x = self.Mixed_6e(x) #14 -> 6 #Efficient Grid Size Reduction x = self.Mixed_7a(x) #6 -> 6 #We are using this solution only on the coarsest grid, #since that is the place where producing high dimensional #sparse representation is the most critical as the ratio of #local processing (by 1 × 1 convolutions) is increased compared #to the spatial aggregation.""" x = self.Mixed_7b(x) x = self.Mixed_7c(x) #6 -> 1 x = self.avgpool(x) x = self.dropout(x) x = x.view(x.size(0), -1) x = self.linear(x) return x def inceptionv3(): return InceptionV3()
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/cifar100/models/inceptionv3.py
inceptionv3.py
import torch import torch.nn as nn class WideBasic(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super().__init__() self.residual = nn.Sequential( nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1 ), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1 ) ) self.shortcut = nn.Sequential() if in_channels != out_channels or stride != 1: self.shortcut = nn.Sequential( nn.Conv2d(in_channels, out_channels, 1, stride=stride) ) def forward(self, x): residual = self.residual(x) shortcut = self.shortcut(x) return residual + shortcut class WideResNet(nn.Module): def __init__(self, num_classes, block, depth=50, widen_factor=1): super().__init__() self.depth = depth k = widen_factor l = int((depth - 4) / 6) self.in_channels = 16 self.init_conv = nn.Conv2d(3, self.in_channels, 3, 1, padding=1) self.conv2 = self._make_layer(block, 16 * k, l, 1) self.conv3 = self._make_layer(block, 32 * k, l, 2) self.conv4 = self._make_layer(block, 64 * k, l, 2) self.bn = nn.BatchNorm2d(64 * k) self.relu = nn.ReLU(inplace=True) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(64 * k, num_classes) def forward(self, x): x = self.init_conv(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.bn(x) x = self.relu(x) x = self.avg_pool(x) x = x.view(x.size(0), -1) x = self.linear(x) return x def _make_layer(self, block, out_channels, num_blocks, stride): """make resnet layers(by layer i didnt mean this 'layer' was the same as a neuron netowork layer, ex. conv layer), one layer may contain more than one residual block Args: block: block type, basic block or bottle neck block out_channels: output depth channel number of this layer num_blocks: how many blocks per layer stride: the stride of the first block of this layer Return: return a resnet layer """ # we have num_block blocks per layer, the first block # could be 1 or 2, other blocks would always be 1 strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_channels, out_channels, stride)) self.in_channels = out_channels return nn.Sequential(*layers) # Table 9: Best WRN performance over various datasets, single run results. def wideresnet(depth=40, widen_factor=10): net = WideResNet(100, WideBasic, depth=depth, widen_factor=widen_factor) return net
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/cifar100/models/wideresidual.py
wideresidual.py
import argparse import logging import random from pathlib import Path import datasets import torch import transformers from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( AdamW, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, default_data_collator, set_seed, ) # ZEUS from zeus.run import ZeusDataLoader logger = logging.getLogger(__name__) def parse_args() -> argparse.Namespace: """Parse command line arguments.""" parser = argparse.ArgumentParser( description="Finetune a transformers model on a text classification task" ) # CAPRICCIO parser.add_argument( "--data_dir", type=str, help="Directory where Capriccio is stored.", required=True, ) parser.add_argument( "--slice_number", type=int, help=( "Which dataset slice to use." "Together with --data_dir, the paths to the train and val files are determined." ), required=True, ) # ZEUS runtime_mode = parser.add_mutually_exclusive_group() runtime_mode.add_argument( "--zeus", action="store_true", help="Whether to run Zeus." ) parser.add_argument( "--target_metric", default=None, type=float, help=( "Stop training when the target metric is reached. This is ignored when running in Zeus mode because" " ZeusDataLoader will receive the target metric via environment variable and stop training by itself." ), ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--batch_size", type=int, help="Batch size for the training and eval dataloader.", required=True, ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--weight_decay", type=float, default=0.0, help="Weight decay to use." ) parser.add_argument( "--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.", ) parser.add_argument( "--seed", type=int, default=None, help="A seed for reproducible training." ) args = parser.parse_args() # CAPRICCIO if not ( train_file := Path(args.data_dir) / f"{args.slice_number}_train.json" ).exists(): raise ValueError(f"'{train_file}' does not exist") args.train_file = str(train_file) if not (val_file := Path(args.data_dir) / f"{args.slice_number}_val.json").exists(): raise ValueError(f"'{val_file}' does not exist") args.val_file = str(val_file) return args def main() -> None: """Run the main training routine.""" args = parse_args() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Load the dataset. # CAPRICCIO data_path = dict(train=args.train_file, validation=args.val_file) logger.info("Using dataset slice: %s", data_path) raw_datasets = load_dataset("json", data_files=data_path) label_list = raw_datasets["train"].unique("label") label_list.sort() num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, finetuning_task=None ) tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, use_fast=not args.use_slow_tokenizer ) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) # CAPRICCIO sentence1_key = "text" sentence2_key = None label_to_id = {v: i for i, v in enumerate(label_list)} model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer( *texts, padding=padding, max_length=args.max_length, truncation=True ) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names, desc="Running tokenizer on dataset", ) # CAPRICCIO train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info("Sample %s of the training set: %s.", index, train_dataset[index]) # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer) # ZEUS if args.zeus: train_dataloader = ZeusDataLoader( train_dataset, batch_size=args.batch_size, max_epochs=args.num_train_epochs, shuffle=True, collate_fn=data_collator, ) eval_dataloader = ZeusDataLoader( eval_dataset, batch_size=args.batch_size, collate_fn=data_collator ) else: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.batch_size, ) eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Send model to CUDA. model = model.cuda() # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Compute the total number of training steps. args.max_train_steps = args.num_train_epochs * len(train_dataloader) # Get the metric function metric = load_metric("accuracy") # Train! logger.info("***** Running training *****") logger.info(" Num examples = %s", len(train_dataset)) logger.info(" Num Epochs = %s", args.num_train_epochs) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %s", args.batch_size, ) logger.info(" Total optimization steps = %s", args.max_train_steps) if args.target_metric is not None: logger.info(" Target metric = %s", args.target_metric) # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps)) completed_steps = 0 # ZEUS if args.zeus: assert isinstance(train_dataloader, ZeusDataLoader) epoch_iter = train_dataloader.epochs() else: epoch_iter = range(args.num_train_epochs) for epoch in epoch_iter: model.train() for batch in train_dataloader: for key, val in batch.items(): if torch.is_tensor(val): batch[key] = val.cuda() outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 model.eval() for batch in eval_dataloader: for key, val in batch.items(): if torch.is_tensor(val): batch[key] = val.cuda() outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) metric.add_batch( predictions=predictions, references=batch["labels"], ) eval_metric = metric.compute() logger.info("epoch %s: %s", epoch, eval_metric) # ZEUS if args.zeus: assert isinstance(train_dataloader, ZeusDataLoader) train_dataloader.report_metric( eval_metric["accuracy"], higher_is_better=True ) # If this were Zeus, the train dataloader will stop training by itself. elif args.target_metric is not None: if eval_metric["accuracy"] >= args.target_metric: logger.info( "Reached target metric %s in %s epochs.", args.target_metric, epoch + 1, ) break if __name__ == "__main__": main()
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/capriccio/train.py
train.py
import argparse import sys from pathlib import Path from zeus.job import Job from zeus.policy import PruningGTSBatchSizeOptimizer from zeus.run import ZeusMaster from zeus.util import FileAndConsole def parse_args() -> argparse.Namespace: """Parse commandline arguments.""" parser = argparse.ArgumentParser() # This random seed is used for # 1. Multi-Armed Bandit inside PruningGTSBatchSizeOptimizer, and # 2. Providing random seeds for training. # Especially for 2, the random seed given to the nth recurrence job is args.seed + n. parser.add_argument("--seed", type=int, default=123, help="Random seed") # Default batch size and learning rate. # The first recurrence uses these parameters, and it must reach the target metric. # In subsequent recurrences, when the batch size changes, the new learning rate is determined # using an adequate learning rate scaling rule. Since this job uses AdamW (see constructor of # `Job`), Square Root Scaling will be used. parser.add_argument("--b_0", type=int, default=128, help="Default batch size") parser.add_argument( "--lr_0", type=float, default=4.00e-7, help="Default learning rate" ) # The range of batch sizes to consider. The example script generates a list of power-of-two # batch sizes, but batch sizes need not be power-of-two for Zeus. parser.add_argument( "--b_min", type=int, default=8, help="Smallest batch size to consider" ) parser.add_argument( "--b_max", type=int, default=128, help="Largest batch size to consider" ) # The total number of recurrences. Capriccio has 38 time-overlapping slices. parser.add_argument( "--num_recurrence", type=int, default=38, help="Total number of recurrences" ) # The \\eta knob trades off time and energy consumption. See Equation 2 in the paper. # The \\beta knob defines the early stopping threshold. See Section 4.4 in the paper. parser.add_argument( "--eta_knob", type=float, default=0.5, help="TTA-ETA tradeoff knob" ) parser.add_argument( "--beta_knob", type=float, default=2.0, help="Early stopping threshold" ) # Jobs are terminated when one of the three happens: # 1. The target validation metric is reached. # 2. The number of epochs exceeds the maximum number of epochs set. # 3. The cost of the next epoch is expected to exceed the early stopping threshold. parser.add_argument( "--target_metric", type=float, default=0.84, help="Target validation metric" ) parser.add_argument( "--max_epochs", type=int, default=10, help="Max number of epochs to train" ) # Zeus employs windowing to adapt to data drift. This argument defines the size of the window. parser.add_argument( "--window_size", type=int, default=10, help="Size of the MAB observation window" ) return parser.parse_args() def main(args: argparse.Namespace) -> None: """Run Zeus on Capriccio.""" # Zeus's batch size optimizer. # First prunes unpromising batch sizes, and then runs Gaussian Thompson Sampling MAB. bso = PruningGTSBatchSizeOptimizer( window_size=args.window_size, seed=args.seed, verbose=True ) # The top-level class for running Zeus. # - The batch size optimizer is desinged as a pluggable policy. # - Paths (log_base and monitor_path) assume our Docker image's directory structure. master = ZeusMaster( batch_size_optimizer=bso, log_base="/workspace/zeus_logs", seed=args.seed, monitor_path="/workspace/zeus/zeus_monitor/zeus_monitor", observer_mode=False, ) # Definition of the Capriccio job. # The `Job` class encloses all information needed to run training. The `command` parameter is # a command template. Curly-braced parameters are recognized by Zeus and automatically filled. job = Job( dataset="capriccio", network="bert_base_uncased", optimizer="adamw", target_metric=args.target_metric, max_epochs=args.max_epochs, default_bs=args.b_0, default_lr=args.lr_0, workdir="/workspace/zeus/examples/capriccio", # fmt: off command=[ "python", "train.py", "--zeus", "--model_name_or_path", "bert-base-uncased", "--data_dir", "../../capriccio/data/", "--slice_number", "{slice_number}", # This will be filled with the current recurrence number. "--max_length", "128", "--batch_size", "{batch_size}", "--learning_rate", "{learning_rate}", "--num_train_epochs", "{epochs}", "--seed", "{seed}", ], # fmt: on ) # Generate a list of batch sizes with only power-of-two values. batch_sizes = [args.b_min] while (bs := batch_sizes[-1] * 2) <= args.b_max: batch_sizes.append(bs) # Create a designated log directory inside `args.log_base` just for this run of Zeus. # Six types of outputs are generated. # 1. Power monitor ouptut (`bs{batch_size}+e{epoch_num}+gpu{device_id}.power.log`): # Raw output of the Zeus power monitor. # 2. Profiling results (`bs{batch_size}.power.json`): # Train-time average power consumption and throughput for each power limit, # the optimal power limit determined from the result of profiling, and # eval-time average power consumption and throughput for the optimal power limit. # 3. Training script output (`rec{recurrence_num}+try{trial_num}.train.log`): # The raw output of the training script. `trial_num` exists because the job # may be early stopped and re-run with another batch size. # 4. Training result (`rec{recurrence_num}+try{trial_num}+bs{batch_size}.train.json`): # The total energy, time, and cost consumed, and the number of epochs trained # until the job terminated. Also, whether the job reached the target metric at the # time of termination. Early-stopped jobs will not have reached their target metric. # 5. ZeusMaster output (`master.log`): Output from ZeusMaster, including MAB outputs. # 6. Job history (`history.py`): # A python file holding a list of `HistoryEntry` objects. Intended use is: # `history = eval(open("history.py").read())` after importing `HistoryEntry`. master_logdir = master.build_logdir( job=job, num_recurrence=args.num_recurrence, eta_knob=args.eta_knob, beta_knob=args.beta_knob, exist_ok=False, # Should err if this directory exists. ) # Overwrite the stdout file descriptor with an instance of `FileAndConsole`, so that # all calls to `print` will write to both the console and the master log file. sys.stdout = FileAndConsole(Path(master_logdir) / "master.log") # Run Zeus! master.run( job=job, num_recurrence=args.num_recurrence, batch_sizes=batch_sizes, beta_knob=args.beta_knob, eta_knob=args.eta_knob, ) if __name__ == "__main__": main(parse_args())
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/examples/ZeusDataLoader/capriccio/run_zeus.py
run_zeus.py
from __future__ import annotations from dataclasses import dataclass from datetime import timedelta from pathlib import Path from typing import cast import pandas as pd from sklearn.metrics import auc @dataclass class HistoryEntry: """Represents the config and result of a job run that may have failed. Attributes: bs: Batch size pl: Power limit energy: Energy consumption in Joules reached: Whether the target metric was reached at the end time: Time consumption in seconds """ bs: int pl: int energy: float reached: bool time: float # ruff: noqa: PLR2004 def energy( logfile: Path | str, start: float | None = None, end: float | None = None, ) -> float: """Compute the energy consumption from the Zeus monitor power log file. `start` and `end` are in units of seconds, relative to the beginning of the time window captured by the log file. Only the time window between `start` and `end` will be considered when computing energy. `start` and `end` can be negative, in which case the pointers wrap around and effectively the absolute value is subtracted from the end of the window. Args: logfile: Path to the power log file produced by the Zeus monitor. start: Start time of the window to consider. end: End time of the window to consider. """ df = cast(pd.DataFrame, pd.read_csv(logfile, engine="python", skipfooter=1)) df["Time"] = pd.to_datetime(df["Time"]) start_timestamp = df.iloc[0]["Time"] end_timestamp = df.iloc[-1]["Time"] if start is not None: origin = start_timestamp if start >= 0.0 else end_timestamp df = df.loc[df["Time"] >= origin + timedelta(seconds=start)] if end is not None: origin = start_timestamp if end >= 0.0 else end_timestamp df = df.loc[df["Time"] <= origin + timedelta(seconds=end)] seconds = _get_seconds(df) watts = _get_watts(df) return auc(seconds, watts) def avg_power( logfile: Path | str, start: float | None = None, end: float | None = None, ) -> float: """Compute the average power consumption from the Zeus monitor power log file. `start` and `end` are in units of seconds, relative to the beginning of the time window captured by the log file. Only the time window between `start` and `end` will be considered when computing average power. `start` and `end` can be negative, in which case the pointers wrap around and effectively the absolute value is subtracted from the end of the window. Args: logfile: Path to the power log file produced by the Zeus monitor. start: Start time of the window to consider. end: End time of the window to consider. Raises: ValueError: From `sklearn.metrics.auc`, when the duration of the profiling window is too small. """ df = cast(pd.DataFrame, pd.read_csv(logfile, engine="python", skipfooter=1)) df["Time"] = pd.to_datetime(df["Time"]) if start is not None: df = df.loc[df["Time"] >= df.iloc[0]["Time"] + timedelta(seconds=start)] if end is not None: df = df.loc[df["Time"] <= df.iloc[0]["Time"] + timedelta(seconds=end)] seconds = _get_seconds(df) watts = _get_watts(df) area = auc(seconds, watts) return area / (seconds.max() - seconds.min()) def _get_seconds(df: pd.DataFrame) -> pd.Series: return df["Time"].map(lambda t: t.timestamp()) def _get_watts(df: pd.DataFrame) -> pd.Series: return df["Power"].div(1000.0)
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/analyze.py
analyze.py
from __future__ import annotations import pynvml from zeus.callback import Callback from zeus.monitor import ZeusMonitor from zeus.util.metric import zeus_cost from zeus.util.logging import get_logger class EarlyStopController(Callback): """Controller for early stopping.""" def __init__( self, monitor: ZeusMonitor | None = None, eta_knob: float = 0.5, cost_threshold: float | None = None, max_epochs: int | None = None, target_metric: float | None = None, higher_is_better: bool | None = None, ) -> None: r"""Initialize the controller. Check whether training should terminate through the `should_training_stop` attribute. - If you gave `max_epochs`, check after `on_epoch_end()`. - If you gave `cost_threshold`, check after `on_epoch_end()`. - If you gave `target_metric`, check after `on_evaluate()`. Args: monitor: The monitor instance to use for measuring time and energy. Required if `cost_threshold` is given. eta_knob: The $0 \le \eta \le 1$ knob for the Zeus time-energy cost. (Default: 0.5) cost_threshold: When running the next epoch will exceed this cost. Only training cost is considered, not validation or testing cost. max_epochs: Maximum number of epochs to run. target_metric: Stop training when the metric reaches this value. higher_is_better: If `True`, `target_metric` is assumed reached when the reported metric is larger than or equal to the `target_metric`. Required if `target_metric` is given. """ # Sanity check the arguments. if max_epochs is not None and max_epochs <= 0: raise ValueError("max_epochs must be positive") if cost_threshold is not None and cost_threshold <= 0: raise ValueError("cost_threshold must be positive") if (cost_threshold is None) ^ (monitor is None): raise ValueError("cost_threshold and monitor must be given together") if (target_metric is None) ^ (higher_is_better is None): raise ValueError( "target_metric and higher_is_better must be given together" ) # Save arguments. self.monitor = monitor self.eta_knob = eta_knob self.cost_threshold = cost_threshold self.max_epochs = max_epochs self.target_metric = target_metric self.higher_is_better = higher_is_better # Setup logging. self.logger = get_logger(type(self).__name__) # Cache NVML device handles if they're needed. self.gpu_handles = {} self.max_power = {} if self.cost_threshold is not None: assert self.monitor is not None pynvml.nvmlInit() for gpu_index in self.monitor.gpu_indices: device = pynvml.nvmlDeviceGetHandleByIndex(gpu_index) self.gpu_handles[gpu_index] = device self.max_power[gpu_index] = ( pynvml.nvmlDeviceGetPowerManagementLimitConstraints(device)[1] // 1000 ) # States. self.epochs_trained = 0 self.epoch_costs = [] # Once switched to `True`, there is no switching back to `False`. self.should_training_stop = False def on_epoch_begin(self) -> None: """Start measuring the cost of the next epoch.""" if self.cost_threshold is not None: assert self.monitor is not None self.monitor.begin_window("__EarlyStopController_epoch") def on_epoch_end(self) -> None: """Check if the training cost of the next epoch will exceed the threshold.""" if self.max_epochs is not None: self.epochs_trained += 1 if self.epochs_trained >= self.max_epochs: self.logger.info( "[Stop training!] Epochs trained %d >= Max epochs %d", self.epochs_trained, self.max_epochs, ) self.should_training_stop = True return if self.cost_threshold is not None: assert self.monitor is not None measurement = self.monitor.end_window("__EarlyStopController_epoch") cost = sum( zeus_cost( energy=measurement.energy[gpu_index], time=measurement.time, eta_knob=self.eta_knob, max_power=self.max_power[gpu_index], ) for gpu_index in self.monitor.gpu_indices ) self.epoch_costs.append(cost) if (nec := self._expected_next_epoch_cost()) >= self.cost_threshold: self.logger.info( "[Stop training!] Expected next epoch cost %f >= Cost threshold %f", nec, self.cost_threshold, ) self.should_training_stop = True return def on_evaluate(self, metric: float) -> None: """Check if the target metric was reached.""" if self.target_metric is not None: assert self.higher_is_better is not None # ruff: noqa: SIM108 if self.higher_is_better: reached = metric >= self.target_metric else: reached = metric <= self.target_metric if reached: self.logger.info( "[Stop training!] Evaluation metric %f reached target metric %f", metric, self.target_metric, ) self.should_training_stop = True def _expected_next_epoch_cost(self) -> float: """Predict the total cost if the next training epoch is to be run.""" cost_until_now = sum(self.epoch_costs) average_epoch_cost = cost_until_now / len(self.epoch_costs) return cost_until_now + average_epoch_cost
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/controller.py
controller.py
from __future__ import annotations from dataclasses import dataclass, field import pandas as pd from zeus.util import LinearScaler, SquareRootScaler @dataclass(frozen=True, unsafe_hash=True) class Job: """Job specification tuple. Attributes: dataset: Name of the dataset. network: Name of the DNN model. optimizer: Name of the optimizer, e.g. Adam. target_metric: Target validation metric. max_epochs: Maximum number of epochs to train before terminating. default_bs: Initial batch size (b0) provided by the user. default_lr: Learning rate corresponding to the default batch size. workdir: Working directory in which to launch the job command. command: Job command template. See [`gen_command`][zeus.job.Job.gen_command]. """ dataset: str network: str optimizer: str target_metric: float max_epochs: int default_bs: int | None = None default_lr: float | None = None workdir: str | None = None command: list[str] | None = field(default=None, hash=False, compare=False) def __str__(self) -> str: """Generate a more conside representation of the object.""" return ( f"Job({self.dataset},{self.network},{self.optimizer},{self.target_metric}" f"{f',bs{self.default_bs}' if self.default_bs is not None else ''}~{self.max_epochs})" ) def to_logdir(self) -> str: """Generate a logdir name that explains this job.""" return ( f"{self.dataset}+{self.network}+bs{self.default_bs}" f"+{self.optimizer}+lr{self.default_lr}" f"+tm{self.target_metric}+me{self.max_epochs}" ) def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: """Pick out the rows corresponding to this job from the DataFrame.""" return df.loc[ (df.dataset == self.dataset) & (df.network == self.network) & (df.optimizer == self.optimizer) & (df.target_metric == self.target_metric) ] def gen_command( self, batch_size: int, learning_rate: float, seed: int, rec_i: int, ) -> list[str]: """Format the job command with given arguments. Args: batch_size: Batch size to use for this job launch. learning_rate: Learning rate to use for this job launch. seed: Random seed to use for this job launch. rec_i: Recurrence number of this job launch. """ assert self.command, "You must provide a command format string for this job." command = [] for piece in self.command: if piece in ["{bs}", "{batch_size}"]: command.append(str(batch_size)) elif piece in ["{lr}", "{learning_rate}"]: command.append(str(learning_rate)) elif piece == "{seed}": command.append(str(seed)) elif piece in ["{epoch}", "{epochs}"]: command.append(str(self.max_epochs)) elif piece == "{slice_number}": command.append(str(rec_i)) elif piece == "{target_metric}": command.append(str(self.target_metric)) else: command.append(piece) return command def scale_lr(self, batch_size: int) -> float: """Scale the learning rate for the given batch size. Assumes that `self.default_bs` and `self.default_lr` were given. Then, `self.default_lr` is scaled for the given `batch_size` using square root scaling for adaptive optimizers (e.g. Adam, Adadelta, AdamW) and linear scaling for others (e.g. SGD). """ assert self.default_bs, "You must provide default_bs to scale LR." assert self.default_lr, "You must provide default_lr to scale LR." optimizer = self.optimizer.lower() if optimizer in ["adam", "adadelta", "adamw"]: scaler = SquareRootScaler(bs=self.default_bs, lr=self.default_lr) return scaler.compute_lr(batch_size) if optimizer in ["sgd"]: scaler = LinearScaler(bs=self.default_bs, lr=self.default_lr) return scaler.compute_lr(batch_size) raise NotImplementedError(f"LR scaling for {self.optimizer} is not supported.")
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/job.py
job.py
from __future__ import annotations import operator from copy import deepcopy from dataclasses import dataclass import numpy as np import pandas as pd from zeus.analyze import HistoryEntry from zeus.job import Job from zeus.policy import BatchSizeOptimizer, PowerLimitOptimizer from zeus.util import zeus_cost # ruff: noqa: PLR0912, PLR0915 class Simulator: """Simulates job execution optimized by Zeus.""" def __init__( self, summary_train: str | pd.DataFrame, summary_power: str | pd.DataFrame, batch_size_optimizer: BatchSizeOptimizer, power_limit_optimizer: PowerLimitOptimizer, seed: int = 123456, verbose: bool = True, ) -> None: """Initialize the simulator. Args: summary_train: Path to or `pd.DataFrame` of the train trace. summary_power: Path to or `pd.DataFrame` of the power trace. batch_size_optimizer: The user is expected to construct the BSO with the desired policy and pass it into the simulator. power_limit_optimizer: The user is expected to construct the PLO with the desired policy and pass it into the simulator. seed: The random seed. Every invocation of any simulation method in this class is deterministic given the random seed, because the internal RNG is deepcopied before running the simulation. verbose: Whether to log out the internal states of the simulator. """ # Generate relevant data. train_df = ( pd.read_csv(summary_train) if isinstance(summary_train, str) else summary_train ) power_df = ( pd.read_csv(summary_power) if isinstance(summary_power, str) else summary_power ) df = train_df.merge(power_df, how="inner") # type: ignore df["TTA"] = df.target_epoch * df.time_per_epoch df["ETA"] = df.TTA * df.average_power # 'energy_per_epoch' is used to compare different power limits with the same batch size # when trying to figure out which power limit is the best one. df["energy_per_epoch"] = df.time_per_epoch * df.average_power self.df = df # Knob optimizers. self.bso = batch_size_optimizer self.plo = power_limit_optimizer # Save arguments. self.seed = seed self.verbose = verbose def simulate_one_job( self, job: Job, num_recurrence: int, beta_knob: float, eta_knob: float, ) -> list[HistoryEntry]: r"""Simulate a sequentially recurring job. Explore with early stopping. Args: job: Job spec to simulate. num_recurrence: How many times the job recurs. beta_knob: `beta_knob * min_eta` is the early stopping cost threshold. Set to `np.inf` to disable early stopping. eta_knob: $\eta$ used in the hybrid cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ Returns: A list of [`HistoryEntry`][zeus.analyze.HistoryEntry] objects for each job run. """ # Copy all internal state so that simulation does not modify any # internal state and is deterministic w.r.t. the random seed. bso = deepcopy(self.bso) plo = deepcopy(self.plo) rng = np.random.default_rng(self.seed) # Figure out MAXPOWER. max_power = self.df.power_limit.max().item() if self.verbose: print(f"[Simulator] Max power = {max_power}W") # Track the minimum cost observed for the early stopping energy threshold. min_cost = np.inf # Simulate each job one at a time. history: list[HistoryEntry] = [] if self.verbose: print(f"[Simulator] {job} recurring {num_recurrence} times.") # A new job. Profile the feasible batch size range. batch_sizes = self._profile_batch_size_range(job) # Register the job in the BSO. bso.register_job(job, batch_sizes) # Job recurs. for i in range(num_recurrence): if self.verbose: print(f"\nRecurrence: {i+1}") # Run the job until convergence. Upper bound the number of retries to 20. # Accumulate the cost of retries before convergence. cost_acc = 0.0 for tries in range(1, 21): # Whether this run of the job needed to profile power. profiled_power = False # Fetch knobs to use. bs = bso.predict(job) pl = plo.predict(job, bs) # When the batch size is first explored, we need to profile power limit. if pl is None: profiled_power = True result = self._profile_power_limit(job, bs, eta_knob) for pl, epe in result.items(): plo.observe(job, bs, pl, epe) pl = plo.predict(job, bs) assert pl # Run the job, potentially early stopping it. eta, tta, reached = self._run_job( job=job, batch_size=bs, power_limit=pl, rng=rng, cost_ub=beta_knob * min_cost, eta_knob=eta_knob, profile_power=profiled_power, ) # The job never ran because even one epoch exceeds the cost threshold. # Let the BSO observe that this batch size is bad, but since the job # did not run, do not add to the history and retry. if eta == 0 and tta == 0 and not reached: bso.observe(job, bs, 100 * beta_knob * min_cost, False) continue # Compute the hybrid cost metric. cost = zeus_cost(eta, tta, eta_knob, max_power) cost_acc += cost # Provide feedback to the BSO. bso.observe(job, bs, cost, reached) # Record history for analysis. history.append(HistoryEntry(bs, pl, eta, reached, tta)) # Reached the target metric. Update min_cost and go on to the next recurrence. if reached: if self.verbose: print() print( f"[Simulator] Reached target metric in {tries} {'try' if tries == 1 else 'tries'}." ) if min_cost > cost_acc: if self.verbose: print( f"[Simulator] Minimum cost updated from {min_cost:.2f} to {cost_acc:.2f}." ) min_cost = cost_acc break # Didn't reach the target metric. # We assume that the default BS (set by the user) will always converge. # That is, reaching the target metric with the model should be a feasible task. if i == 0: raise RuntimeError( f"The default batch size {job.default_bs} did not converge." ) # Target metric was not reached in 20 tries. We consider this target metric to be unreachable. else: raise RuntimeError("Job did not reach the target metric in 20 tries.") if self.verbose: print() print( f"[Simulator] {job} (BS, PL, ETA, whether_reached, TTA) history: \n{history}" ) return history def simulate_one_alibaba_group( self, job: Job, group_df: pd.DataFrame, beta_knob: float, eta_knob: float, ) -> list[HistoryEntry]: r"""Run simulation on one group in the Alibaba trace. Concurrent job submissions (jobs that start before the previous job finishes) are launched with the batch size known to be of minimum cost at that time. The BSO also observes the results of these jobs when they are done, and these jobs may well finish before a job that started before it. See `observe` in PruningGTSBatchSizeOptimizer for an example of handing such a scenario. Args: job: Job spec of this group. group_df: DataFrame of this group. Fields: - group: Group ID in trace. Identical across all rows. - dataset: Dataset name. Identical across all rows. - start_time: Job start time in the trace. - end_time: Job end time in the trace. - runtime_ratio: runtime of this job over the mean runtime of all the jobs of this dataset. Captures intra-dataset job runtime differences. beta_knob: `beta_knob * min_eta` is the early stopping cost threshold. Set to `np.inf` to disable early stopping. eta_knob: $\eta$ used in the hybrid cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ Returns: A list of [`HistoryEntry`][zeus.analyze.HistoryEntry] objects for each job run. """ # Copy all internal state so that simulation does not modify any # internal state and is deterministic w.r.t. the random seed. bso = deepcopy(self.bso) plo = deepcopy(self.plo) rng = np.random.default_rng(self.seed) # Sanity check if job.default_bs is None: raise ValueError("You must give the job a default batch size.") # Figure out MAXPOWER. max_power = self.df.power_limit.max().item() if self.verbose: print(f"[Simulator] Max power = {max_power}W") # Track the minimum cost observed for the early stopping energy threshold. # Also track the corresponding minimum cost BS to handle concurrent jobs. min_cost = np.inf best_bs = job.default_bs # Simulate each job one at a time. history: list[HistoryEntry] = [] if self.verbose: print(f"[Simulator] {job} recurring {len(group_df)} times.") # A new job. Profile the feasible batch size range. batch_sizes = self._profile_batch_size_range(job) # Register the job in the BSO. bso.register_job(job, batch_sizes) # List of jobs in flight. @dataclass class RunningJob: """Represents a job that is running. We know what's going to happen to this job when we launch it. Thus, pre-compute all results (using self.run_job) and have this instance hold the information. Then, jobs will be removed from the list of running jobs when the current time passes self.end_time and the result will be observed. """ start_time: float end_time: float runtime_ratio: float batch_size: int power_limit: int reached: bool time: float energy: float cost: float running_jobs: list[RunningJob] = [] # Jobs in group_df are already sorted by start_time. current_time = 0.0 for rec_i, (_, job_row) in enumerate(group_df.iterrows()): if self.verbose: print(f"\nRecurrence: {rec_i+1}") # Update the current time. current_time = job_row.start_time if self.verbose: print(f"[Simulator] Current time is {current_time:.1f}") # Scan the in-flight job list to reap jobs that have finished. # We need a while loop here because we might have submitted a retry job # while reaping jobs that failed to reach the target metric, and that retry job # may finish before the current job. # pylint: disable=cell-var-from-loop while any(map(lambda j: j.end_time <= current_time, running_jobs)): if self.verbose: print(f"[Simulator] Running jobs: {running_jobs}") # We copy running_jobs because we want to mutate it as we traverse it. running_jobs_copy = deepcopy(running_jobs) # Sort the jobs in the order they end. for rjob in sorted( running_jobs_copy, key=operator.attrgetter("end_time") ): # We're only interested in jobs that finished at this point in time. if rjob.end_time > current_time: continue # Job is finished at this point in time. if self.verbose: print( f"[Simulator] Job(BS={rjob.batch_size},time={rjob.time}," f"energy={rjob.energy},reached={rjob.reached}) finished" ) # Remove the job from the in-flight job list. running_jobs.remove(rjob) # Let the BSO observe the cost for this batch size. bso.observe(job, rjob.batch_size, rjob.cost, rjob.reached) # If the job never ran because even one epoch exceeds the cost threshold, # do not add to the history and retry. if rjob.energy != 0 or rjob.time != 0 or rjob.reached: # Record history for analysis. history.append( HistoryEntry( rjob.batch_size, rjob.power_limit, rjob.energy * rjob.runtime_ratio, # Scale the energy of this job by the runtime ratio. rjob.reached, rjob.time * rjob.runtime_ratio, # Scale the runtime of this job by the runtime ratio. ) ) # Reached target metric (no need to retry) if rjob.reached: if min_cost > rjob.cost: if self.verbose: print( f"[Simulator] Minimum cost updated from {min_cost:.2f} to {rjob.cost:.2f}" ) min_cost = rjob.cost best_bs = rjob.batch_size # Didn't reach target metric. Need to run a retry job. else: profiled_power = False # If there are jobs in-flight, we just run additional concurrent # submissions with the best known knobs. if running_jobs: if self.verbose: print( f"[Simulator] There are in-flight jobs. Use BS {best_bs}." ) bs = best_bs pl = plo.predict(job, bs) assert pl, f"Power not profiled for best known BS {bs}" # There are no jobs in-flight. Just submit the job normally. else: # Determine the knobs. bs = bso.predict(job) pl = plo.predict(job, bs) if self.verbose: print( f"[Simulator] There are no in-flight jobs. Use BSO's prediction {bs}." ) # When the batch size is first explored, we need to profile power limit. if pl is None: profiled_power = True result = self._profile_power_limit(job, bs, eta_knob) for pl, epe in result.items(): plo.observe(job, bs, pl, epe) pl = plo.predict(job, bs) assert pl # Pre-compute the result of the job. eta, tta, reached = self._run_job( job=job, batch_size=bs, power_limit=pl, rng=rng, cost_ub=beta_knob * min_cost, eta_knob=eta_knob, profile_power=profiled_power, ) # Compute the hybrid cost metric. cost = zeus_cost(eta, tta, eta_knob, max_power) # Create the RunningJob instance. running_job = RunningJob( start_time=rjob.end_time, end_time=rjob.end_time + (rjob.end_time - rjob.start_time), # Assume same runtime. runtime_ratio=rjob.runtime_ratio, batch_size=bs, power_limit=pl, reached=reached, time=tta, energy=eta, cost=cost, ) running_jobs.append(running_job) if self.verbose: print(f"[Simulator] Started retry job {running_job}") # We must break from the loop that scans the running_jobs list, because # the job we just submitted might actually be the next job that finishes. break # We're (finally) done reaping all finished jobs. Run the current job. profiled_power = False # If there are jobs in-flight, we just run additional concurrent # submissions with the best known knobs. if running_jobs: if self.verbose: print(f"[Simulator] There are in-flight jobs. Use BS {best_bs}.") bs = best_bs pl = plo.predict(job, bs) assert pl is not None, f"Power not profiled for best known BS {bs}" # There are no jobs in-flight. Just submit the job. else: # Determine the knobs. bs = bso.predict(job) pl = plo.predict(job, bs) if self.verbose: print( f"[Simulator] There are no in-flight jobs. Use BSO's prediction {bs}." ) # When the batch size is first explored, we need to profile power limit. if pl is None: profiled_power = True result = self._profile_power_limit(job, bs, eta_knob) for pl, epe in result.items(): plo.observe(job, bs, pl, epe) pl = plo.predict(job, bs) assert pl # Run the job, potentially early stopping it. eta, tta, reached = self._run_job( job=job, batch_size=bs, power_limit=pl, rng=rng, cost_ub=beta_knob * min_cost, eta_knob=eta_knob, profile_power=profiled_power, ) # Compute the hybrid cost metric. cost = zeus_cost(eta, tta, eta_knob, max_power) # Create the RunningJob instance and enqueue. running_job = RunningJob( start_time=job_row.start_time, end_time=job_row.end_time, runtime_ratio=job_row.runtime_ratio, batch_size=bs, power_limit=pl, reached=reached, time=tta, energy=eta, cost=cost, ) running_jobs.append(running_job) if self.verbose: print(f"[Simulator] Started job {running_job}") # Now, we're done iterating the rows of group_df. # Set the current time to infinity so that all running jobs finish. current_time = np.inf if self.verbose: print("\n[Simulator] Reap all jobs") # Scan the remaining in-flight job list to reap jobs that have finished. # Since current_time is infinity, this while loop will reap all the jobs ever launched. while any(map(lambda j: j.end_time <= current_time, running_jobs)): if self.verbose: print(f"[Simulator] Running jobs: {running_jobs}") # We copy running_jobs because we want to mutate it as we traverse it. running_jobs_copy = deepcopy(running_jobs) # Sort the jobs in the order they end. for rjob in sorted(running_jobs_copy, key=operator.attrgetter("end_time")): # We're only interested in jobs that finished at this point in time. if rjob.end_time > current_time: continue # Job is finished at this point in time. if self.verbose: print( f"[Simulator] Job(BS={rjob.batch_size},time={rjob.time}," f"energy={rjob.energy},reached={rjob.reached}) finished" ) # Remove the job from the in-flight job list. running_jobs.remove(rjob) # Let the BSO observe the cost for this batch size. bso.observe(job, rjob.batch_size, rjob.cost, rjob.reached) # If the job never ran because even one epoch exceeds the cost threshold, # do not add to the history and retry. if rjob.energy != 0 or rjob.time != 0 or rjob.reached: # Record history for analysis. history.append( HistoryEntry( rjob.batch_size, rjob.power_limit, rjob.energy * rjob.runtime_ratio, # Scale the energy of this job by the runtime ratio. rjob.reached, rjob.time * rjob.runtime_ratio, # Scale the runtime of this job by the runtime ratio. ) ) # Reached target metric (no need to retry) if rjob.reached: if min_cost > rjob.cost: if self.verbose: print( f"[Simulator] Minimum cost updated from {min_cost:.2f} to {rjob.cost:.2f}" ) min_cost = rjob.cost best_bs = rjob.batch_size # Didin't reach target metric. Need to run a retry job. else: profiled_power = False # If there are jobs in-flight, we just run additional concurrent # submissions with the best known knobs. if running_jobs: if self.verbose: print( f"[Simulator] There are in-flight jobs. Use BS {best_bs}." ) bs = best_bs pl = plo.predict(job, bs) assert pl, f"Power not profiled for best known BS {bs}" # There are no jobs in-flight. Just submit the job normally. else: # Determine the knobs. bs = bso.predict(job) pl = plo.predict(job, bs) if self.verbose: print( f"[Simulator] There are no in-flight jobs. Use BSO's prediction {bs}." ) # When the batch size is first explored, we need to profile power limit. if pl is None: profiled_power = True result = self._profile_power_limit(job, bs, eta_knob) for pl, epe in result.items(): plo.observe(job, bs, pl, epe) pl = plo.predict(job, bs) assert pl # Pre-compute the result of the job. eta, tta, reached = self._run_job( job=job, batch_size=bs, power_limit=pl, rng=rng, cost_ub=beta_knob * min_cost, eta_knob=eta_knob, profile_power=profiled_power, ) # Compute the hybrid cost metric. cost = zeus_cost(eta, tta, eta_knob, max_power) # Create the RunningJob instance. running_job = RunningJob( start_time=rjob.end_time, end_time=rjob.end_time + (rjob.end_time - rjob.start_time), # Assume same runtime. runtime_ratio=rjob.runtime_ratio, batch_size=bs, power_limit=pl, reached=reached, time=tta, energy=eta, cost=cost, ) running_jobs.append(running_job) if self.verbose: print(f"[Simulator] Started retry job {running_job}") # We must break from the loop that scans the running_jobs list, because # the job we just submitted might actually be the next job that finishes. break return history def _run_job( self, job: Job, batch_size: int, power_limit: int, rng: np.random.Generator, cost_ub: float, eta_knob: float, profile_power: bool, ) -> tuple[float, float, bool]: r"""Simulate running the job and return the energy consumed and whether it converged. This method will randomly choose one of the possible training "paths". Then, based on cost_ub, it will compute the maximum number of epochs the job can run. If the path's target_epoch is smaller than or equal to the maximum number of epochs, the cost incurred until target_epoch is returned. Otherwise, the cost incurred until the maximum number of epochs is returned. It is important to note that the job may never run when the first epoch's cost is already expected to exceed the cost upper bound. In such a case, the returned time and energy consumptions will be zero. This case must be treated separately in the calling code. If profile_power is True, the first epoch will JIT-profile power limits coarsely by dividing the epoch evenly into len(available_power_limits) pieces. Thus the the first epoch's energy and time consumption will be slightly adjusted. Args: job: Job spec to run. batch_size: Batch size to use. power_limit: Power limit to use. Regardless of whether this run of this batch size requires power profiling, the simulator will input the optimal power limit for the batch size. The first epoch energy consumption from power profiling is adjusted in the latter half of this method based on the profile_power flag. rng: Random number generator used to sample one training path. cost_ub: Cost upper bound. The job is terminated when the next epoch is going to exceed the cost upper bound. eta_knob: $\eta$ used in the hybrid cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ profile_power: Whether this run of the job should profile power during the first epoch. Returns: Tuple of energy consumption, time consumption, and whether the job reached the target metric. """ # df is filtered with job spec, BS, and PL. We sample one possible training path. # power_df is filtered with job spec and BS. We use this to compute the energy # consumption of profiling power during the first epoch. df = job.filter_df(self.df) power_df = df.loc[df.batch_size == batch_size] df = power_df.loc[df.power_limit == power_limit] path = df.sample(n=1, random_state=rng) # Max number of epochs is bound by either the cost upper bound or the user-specified # max_epochs, whichever is smaller. if cost_ub == np.inf: # cost_ub is infinity in two cases: # 1. The simulator has never observed any cost value in the early part of simulation. # 2. We're simulating with no early stopping, i.e. beta_knob is infinity. max_epochs = job.max_epochs if self.verbose: print(f"[run job] Cost UB is inf. {max_epochs=}") else: # Stop right before the first epoch when cost will cross the upper bound. cost_per_epoch = ( eta_knob * path.energy_per_epoch.item() + (1 - eta_knob) * power_df.power_limit.max().item() * path.time_per_epoch.item() ) max_epochs = min(cost_ub // cost_per_epoch, job.max_epochs) if self.verbose: print(f"[run job] {cost_ub=}") print(f"[run job] {cost_per_epoch=}") print(f"[run job] {max_epochs=}") # The job virtually never ran. Time and Energy being zero will be treated specially outside. # If the min_cost is so low, this might even prevent this BS from running at all. if max_epochs == 0: print( f"[run job] {job} cannot run even one epoch without exceeding the cost UB." f" BS {batch_size}, PL {power_limit}, {eta_knob=}" ) return 0.0, 0.0, False def compute_energy_and_time( num_epochs: int, profile_power: bool ) -> tuple[float, float]: """Compute the energy and time consumed for running the job for num_epochs.""" # This is the first run of this batch size, and we need to profile power # during the first epoch. if profile_power: # Note that power_df holds rows with all power limits. Evenly splitting the # epochs with the number of samples and running each slice with each power # limit consumes (1/N) * e_100 + (1/N) * e_125 + ... + (1/N) * e_250. # Also there are all runs 1, 2, ... included, but power info is actually # completely duplicated across different runs in the DataFrame. # Thus, taking the mean across the entire power_df gets us what we want. energy_first_epoch = power_df.energy_per_epoch.mean().item() energy_from_second_epoch = path.energy_per_epoch.item() * ( num_epochs - 1 ) energy_consumption = energy_first_epoch + energy_from_second_epoch time_first_epoch = power_df.time_per_epoch.mean().item() time_from_second_epoch = path.time_per_epoch.item() * (num_epochs - 1) time_consumption = time_first_epoch + time_from_second_epoch # Just run num_epochs with the given power limit. Simple. else: energy_consumption = path.energy_per_epoch.item() * num_epochs time_consumption = path.time_per_epoch.item() * num_epochs return energy_consumption, time_consumption # Job reached target metric. target_epoch = path.target_epoch.item() if path.target_epoch.notnull().item() and target_epoch <= max_epochs: eta, tta = compute_energy_and_time(target_epoch, profile_power) if self.verbose: print( f"[run job] {job} @ {batch_size},{power_limit}W{' prof' if profile_power else ''} " f"=> \033[31mReached in {int(target_epoch)} epochs, " f"TTA {tta:.2f} seconds, ETA {eta:.2f}\033[0m" ) return eta, tta, True # Job failed to reach the target metric. energy_consumption, time_consumption = compute_energy_and_time( max_epochs, profile_power ) if self.verbose: print( f"[run job] {job} @ {batch_size},{power_limit}W{' prof' if profile_power else ''} " f"=> \033[31mFailed (stopped after {int(max_epochs)} epochs), " f"TTA {time_consumption:.2f} seconds, ETA {energy_consumption:.2f}\033[0m" ) return energy_consumption, time_consumption, False def _profile_power_limit( self, job: Job, batch_size: int, eta_knob: float ) -> dict[int, float]: """Simulate running the job and profiling the power limit. Returns: Dictionary mapping PL to `energy_per_epoch`. PL is inserted in high to low order. """ # Filter by job spec and BS. df = job.filter_df(self.df) df = df.loc[(df.batch_size == batch_size)] # Compute the epoch cost of each power limit (Equation 7). max_pl = df.power_limit.max().item() df = df.groupby(["power_limit"], as_index=False).mean(numeric_only=True) df["epoch_cost"] = ( eta_knob * df["average_power"] + (1 - eta_knob) * max_pl ) * df["time_per_epoch"] # We'll be profiling energy from larger to smaller power limits. df = df.sort_values(by="power_limit", ascending=False) result = {rec.power_limit: rec.epoch_cost for rec in df.to_records(index=False)} if self.verbose: print(f"[PL profile] {job} @ {batch_size} => PL = {min(result, key=result.get)}W") # type: ignore return result def _profile_batch_size_range(self, job: Job) -> list[int]: """Simulate profiling the available batch size range of the job. Returns: A list of feasible batch sizes. """ df = self.df # Do not filter by target_metric here since we do not want to constrain # the feasible batch size range to only those that reached the target metric. df = df.loc[ (df.dataset == job.dataset) & (df.network == job.network) & (df.optimizer == job.optimizer) ] result = sorted(list(df.batch_size.unique())) if self.verbose: print(f"[BS profile] {job} => BS = {result}") return result
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/simulate.py
simulate.py
from __future__ import annotations class Callback: """Base class for callbacks.""" def on_train_begin(self) -> None: """Called at the beginning of training.""" def on_train_end(self) -> None: """Called at the end of training.""" def on_epoch_begin(self) -> None: """Called at the beginning of each epoch.""" def on_epoch_end(self) -> None: """Called at the end of each epoch.""" def on_step_begin(self) -> None: """Called at the beginning of each step.""" def on_step_end(self) -> None: """Called at the end of each step.""" def on_evaluate(self, metric: float) -> None: """Called after evaluating the model.""" class CallbackSet(Callback): """A set of callbacks.""" def __init__(self, callbacks: list[Callback]) -> None: """Initialize the callback set.""" self.callbacks = callbacks def on_train_begin(self) -> None: """Called at the beginning of training.""" for callback in self.callbacks: callback.on_train_begin() def on_train_end(self) -> None: """Called at the end of training.""" for callback in self.callbacks: callback.on_train_end() def on_epoch_begin(self) -> None: """Called at the beginning of each epoch.""" for callback in self.callbacks: callback.on_epoch_begin() def on_epoch_end(self) -> None: """Called at the end of each epoch.""" for callback in self.callbacks: callback.on_epoch_end() def on_step_begin(self) -> None: """Called at the beginning of each step.""" for callback in self.callbacks: callback.on_step_begin() def on_step_end(self) -> None: """Called at the end of each step.""" for callback in self.callbacks: callback.on_step_end() def on_evaluate(self, metric: float) -> None: """Called after evaluating the model.""" for callback in self.callbacks: callback.on_evaluate(metric)
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/callback.py
callback.py
from __future__ import annotations import atexit from pathlib import Path from abc import ABC, abstractmethod import pynvml from pydantic import BaseModel, PositiveInt, PositiveFloat from zeus.callback import Callback from zeus.monitor import ZeusMonitor from zeus.util.logging import get_logger from zeus.util.metric import zeus_cost class OptimumSelector(ABC): """Base class for optimum power limit selectors.""" @abstractmethod def select(self, measurements: list[PowerLimitMeasurement]) -> int: """Select the optimal power limit (W) from measurements.""" class Energy(OptimumSelector): """Selects the power limit that minimizes energy consumption.""" def select(self, measurements: list[PowerLimitMeasurement]) -> int: """Select the optimal power limit (W) from measurements.""" return min(measurements, key=lambda x: x.energy).power_limit class Time(OptimumSelector): """Selects the power limit that minimizes training time. This may not necessarily choose the maximum power limit, as time profiling results can be slightly noisy. However, we believe that's actually better because it means that training time is very similar among higher power limits, but lower power limit will consume less power. """ def select(self, measurements: list[PowerLimitMeasurement]) -> int: """Select the optimal power limit (W) from measurements.""" return min(measurements, key=lambda x: x.time).power_limit class ZeusCost(OptimumSelector): r"""Selects the power limit that minimizes a linear Zeus time-energy cost function. Cost function is $C = \eta \cdot Energy + MaxPower \cdot (1 - \eta) \cdot Time$. """ def __init__(self, eta_knob: float, world_size: int = 1) -> None: r"""Initialize the selector. Args: eta_knob: The $0 \le \eta \le 1$ knob for the Zeus time-energy cost function. world_size: The number of GPUs in the training job. Defaults to 1. """ if eta_knob < 0 or eta_knob > 1: raise ValueError("eta_knob must be between 0 and 1, inclusive both sides.") if world_size < 1: raise ValueError("world_size must be greater than or equal to 1.") self.eta_knob = eta_knob self.world_size = world_size def select(self, measurements: list[PowerLimitMeasurement]) -> int: """Select the optimal power limit (W) from measurements.""" max_power = ( max(measurement.power_limit for measurement in measurements) * self.world_size ) zeus_cost_map = { measurement.power_limit: zeus_cost( energy=measurement.energy, time=measurement.time, eta_knob=self.eta_knob, max_power=max_power, ) for measurement in measurements } return min(zeus_cost_map, key=lambda x: zeus_cost_map[x]) class MaxSlowdownConstraint(OptimumSelector): """Selects the minumum power limit that does not slow down training by more than the given factor.""" def __init__(self, factor: float) -> None: """Initialize the selector. Args: factor: The maximum allowed slowdown factor. Greater than or equal to 1.0. """ if factor < 1.0: raise ValueError( f"max_slowdown_factor must be greater than or equal to 1.0. Got {factor}.", ) self.factor = factor def select(self, measurements: list[PowerLimitMeasurement]) -> int: """Select the optimal power limit (W) from measurements.""" feasible_power_limits = [] max_power = max(measurement.power_limit for measurement in measurements) shortest_time = next( measurement.time for measurement in measurements if measurement.power_limit == max_power ) for measurement in measurements: if measurement.time <= self.factor * shortest_time: feasible_power_limits.append(measurement.power_limit) return min(feasible_power_limits) class Ready(BaseModel): """State for when we are ready to start measuring the next power limit. Initial state of the state machine if no previous profiling results were given. `Ready` -> `Warmup` after `step`'th `on_step_begin`. """ next_power_limit: PositiveInt steps: PositiveInt class Warmup(BaseModel): """State for when we are warming up for a power limit. `Warmup` -> `Profiling` on the `steps`'th `on_step_begin`. `Warmup` -> `Ready` on `on_epoch_end` before `steps`'th `on_step_begin`. """ current_power_limit: PositiveInt steps: PositiveInt class Profiling(BaseModel): """State for when we are profiling a power limit. `Profiling` -> `Warmup` after `steps`'th `on_step_begin` and there are still power limits left to profile. `Profiling` -> `Done` after `steps`'th `on_step_begin` and there are no more power limits left to profile. `Profiling` -> `Ready` on `on_epoch_end` before `steps`'th `on_step_begin`. """ current_power_limit: PositiveInt steps: PositiveInt class Done(BaseModel): """State for when we are done profiling all power limits. Initial state of the state machine if previous profiling results were given. Final state of the state machine in any case. """ optimal_power_limit: PositiveInt class PowerLimitMeasurement(BaseModel): """POD for GPU energy and time measurements for one power limit (W).""" power_limit: PositiveInt # In Watts. energy: PositiveFloat time: PositiveFloat class _PowerLimitMeasurementList(BaseModel): """Proxy class to save and load a list of `PowerLimitMeasurement`s.""" measurements: list[PowerLimitMeasurement] class GlobalPowerLimitOptimizer(Callback): """Optimizer for the power limit knob. This optimizer uses the JIT profiling log to determine the optimal power limit. """ def __init__( self, monitor: ZeusMonitor, optimum_selector: OptimumSelector | None = None, wait_steps: int = 1, warmup_steps: int = 10, profile_steps: int = 40, pl_step: int = 25, profile_path: str | Path | None = None, ) -> None: r"""Initialize the optimizer. GPU indices to profile and optimize for are taken from `monitor.gpu_indices`. Args: monitor: `ZeusMonitor` instance used to profile GPU time and energy consumption. optimum_selector: The optimum selector to use. If not given, use `ZeusCost` with \eta=0.5. wait_steps: Number of steps to pass by before doing anything at the beginning. Useful if you have something like `torch.backends.cudnn.benchmark=True`, because the first iteration won't be representative of the rest of the iterations. warmup_steps: Number of warmup iterations for each power limit. profile_steps: Number of profie iterations for each power limit. pl_step: The stride between power limits to explore, in unites of Watts. profile_path: If the path points to an existing file, load the profile from the file and do not run any profiling. If the path points to a non-existing file, profile and save the profile to the file. If `None`, do not save or load any profile. """ # Sanity checks. if wait_steps < 0: raise ValueError("wait_steps must be non-negative.") if warmup_steps < 0: raise ValueError("warmup_steps must be non-negative.") if profile_steps <= 0: raise ValueError("profile_steps must be positive.") if pl_step <= 0: raise ValueError("pl_step must be positive.") self.monitor = monitor self.optimum_selector = optimum_selector or ZeusCost( eta_knob=0.5, world_size=len(monitor.gpu_indices), ) self.warmup_steps = warmup_steps self.profile_steps = profile_steps self.pl_step = pl_step * 1000 # Internally, we use milliWatts. self.profile_path = ( Path(profile_path) if isinstance(profile_path, str) else profile_path ) # Setup logging. self.logger = get_logger(type(self).__name__) # Set the range of power limits to explore. # Assert that supported power limits ranges are uniform across GPUs. pynvml.nvmlInit() pls = [] self.handles = [] for index in monitor.nvml_gpu_indices: device = pynvml.nvmlDeviceGetHandleByIndex(index) self.handles.append(device) pls.append(pynvml.nvmlDeviceGetPowerManagementLimitConstraints(device)) if not all(pls[0] == pl for pl in pls): raise ValueError("Power limits ranges are not uniform across GPUs.") self.power_limits = list( range(pls[0][1], pls[0][0] - self.pl_step, -self.pl_step) ) # Turn on persistence mode and set to the highest power limit. try: for handle in self.handles: pynvml.nvmlDeviceSetPersistenceMode(handle, pynvml.NVML_FEATURE_ENABLED) except pynvml.NVMLError_NoPermission: # type: ignore raise RuntimeError( "SYS_ADMIN capability is required to modify GPU power limits. " "Using --cap-add SYS_ADMIN when running the Docker container " "is the easiest way to do this." ) from None self.current_power_limit = 0 # Store `Measurement` objects in a list, one for each power limit. self.measurements: list[PowerLimitMeasurement] = [] # State for the profiler state machine. self.state: Ready | Warmup | Profiling | Done # Initialize JIT profiling states. if self.profile_path is None: self.logger.info("JIT profiling enabled.") self.logger.info("Will wait %d step(s) before profiling.", wait_steps) self.state = Ready( next_power_limit=self.power_limits[0], steps=wait_steps + 1 ) self.logger.info("Set power limit to the maximum before starting.") self._set_power_limit(max(self.power_limits)) elif not self.profile_path.exists(): self.logger.info( "JIT Profiling enabled. Profile will be saved to '%s'.", str(self.profile_path), ) self.logger.info("Will wait %d step(s) before profiling.", wait_steps) self.state = Ready( next_power_limit=self.power_limits[0], steps=wait_steps + 1 ) self.logger.info("Set power limit to the maximum before starting.") self._set_power_limit(max(self.power_limits)) else: self.measurements = _PowerLimitMeasurementList.model_validate_json( open(self.profile_path).read(), strict=True, ).measurements self.logger.info( "Loaded previous profiling results from '%s'.", str(self.profile_path) ) optimal_power_limit = self._compute_optimal_power_limit() self.logger.info( "Optimal power limit is %d W.", optimal_power_limit // 1000 ) self.state = Done(optimal_power_limit=optimal_power_limit) self._set_power_limit(self.state.optimal_power_limit) # Restore all GPUs back to their maximum power limit on exit. atexit.register(lambda: self._set_power_limit(max(self.power_limits))) def on_epoch_end(self) -> None: """Mark the end of a training epoch.""" if isinstance(self.state, Ready): pass elif isinstance(self.state, (Warmup, Profiling)): # Warmup/Profiling stage interrupted by the end of an epoch. self.logger.info( "%s phase for %d W interrupted by the end of a training epoch.", type(self.state).__name__, self.state.current_power_limit // 1000, ) if isinstance(self.state, Profiling): self.monitor.end_window( f"__GlobalPowerLimitOptimizer_{self.state.current_power_limit // 1000}", cancel=True, ) self.state = Ready(next_power_limit=self.state.current_power_limit, steps=1) self._set_power_limit(max(self.power_limits)) elif isinstance(self.state, Done): pass def on_step_begin(self) -> None: """Mark the beginning of a training step.""" if isinstance(self.state, Ready): self.state.steps -= 1 if self.state.steps == 0: self.logger.info( "Starting warmup for power limit %d W.", self.state.next_power_limit // 1000, ) self._set_power_limit(self.state.next_power_limit) self.state = Warmup( current_power_limit=self.state.next_power_limit, steps=self.warmup_steps, ) elif isinstance(self.state, Warmup): self.state.steps -= 1 if self.state.steps == 0: self.logger.info( "Starting actual profiling for power limit %d W.", self.state.current_power_limit // 1000, ) self.state = Profiling( current_power_limit=self.state.current_power_limit, steps=self.profile_steps, ) self.monitor.begin_window( f"__GlobalPowerLimitOptimizer_{self.state.current_power_limit // 1000}", ) elif isinstance(self.state, Profiling): self.state.steps -= 1 if self.state.steps == 0: measurement = self.monitor.end_window( f"__GlobalPowerLimitOptimizer_{self.state.current_power_limit // 1000}", ) self.logger.info( "Finished profiling for power limit %d W.", self.state.current_power_limit // 1000, ) self.measurements.append( PowerLimitMeasurement( power_limit=self.state.current_power_limit // 1000, energy=measurement.total_energy, time=measurement.time, ) ) # If we're done profiling all power limits, compute the optimal # power limit and transition to the Done state. Otherwise, move # on to the Warmup phase for the next power limit. current_power_limit_index = self.power_limits.index( self.state.current_power_limit ) if current_power_limit_index == len(self.power_limits) - 1: self.state = Done( optimal_power_limit=self._compute_optimal_power_limit(), ) self._set_power_limit(self.state.optimal_power_limit) self._save_profile() else: next_power_limit = self.power_limits[current_power_limit_index + 1] self.logger.info( "Starting warmup for power limit %d W.", next_power_limit // 1000, ) self._set_power_limit(next_power_limit) self.state = Warmup( current_power_limit=next_power_limit, steps=self.warmup_steps, ) elif isinstance(self.state, Done): pass def _set_power_limit(self, power_limit: int) -> None: """Set the power limit for all GPUs. Args: power_limit: The power limit to set, in milliWatts. """ self.logger.info("Setting power limit to %d W.", power_limit // 1000) if self.current_power_limit == power_limit: return for handle in self.handles: pynvml.nvmlDeviceSetPowerManagementLimit(handle, power_limit) self.current_power_limit = power_limit def _compute_optimal_power_limit(self) -> int: """Compute the optimal power limit in milliWatts.""" optimal_power_limit = self.optimum_selector.select(self.measurements) * 1000 self.logger.info("Optimal power limit is %d W.", optimal_power_limit // 1000) return optimal_power_limit def _save_profile(self) -> None: """Save JIT profiling results and the optimal power limit to a JSON file.""" if self.profile_path is None: return assert isinstance(self.state, Done) with self.profile_path.open("w", encoding="utf-8") as f: f.write( _PowerLimitMeasurementList( measurements=self.measurements ).model_dump_json(indent=4) ) self.logger.info("JIT profiling results saved to '%s'.", str(self.profile_path))
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/optimizer/power_limit.py
power_limit.py
from __future__ import annotations from collections import defaultdict from typing import Generator import numpy as np from zeus.job import Job from zeus.policy.interface import BatchSizeOptimizer, PowerLimitOptimizer from zeus.policy.mab import GaussianTS class GTSBatchSizeOptimizer(BatchSizeOptimizer): """One Gaussian Thompson Sampling MAB for each job.""" # ruff: noqa: D417 def __init__( self, learn_reward_precision: bool, reward_precision: float = 0.0, prior_mean: float = 0.0, prior_precision: float = 0.0, num_exploration: int = 1, seed: int = 123456, verbose: bool = True, ) -> None: """Initialze the optimizer. Refer to the constructor of [`GaussianTS`][zeus.policy.mab.GaussianTS] for descriptions of other arguments. Args: learn_reward_precision: Whether to learn the reward precision of each arm as we accumulate observations. """ self.learn_reward_precision = learn_reward_precision self.reward_precision = reward_precision self.prior_mean = prior_mean self.prior_precision = prior_precision self.num_exploration = num_exploration self.seed = seed self.verbose = verbose # One MAB for each job. self.mabs: dict[Job, GaussianTS] = {} # Track the batch size range for each job. self.batch_sizes: dict[Job, list[int]] = {} # Observation history (batch size, reward) for each job. self.history: dict[Job, defaultdict[int, list[float]]] = {} @property def name(self) -> str: """Name of the batch size optimizer.""" return "GaussianTS BSO" def register_job(self, job: Job, batch_sizes: list[int]) -> None: """Instantiate a new GaussianTS MAB for the new job.""" # We do not want to reset the state related to this job if # anything already exists. if job in self.mabs: return self.mabs[job] = GaussianTS( arms=batch_sizes, reward_precision=self.reward_precision, prior_mean=self.prior_mean, prior_precision=self.prior_precision, num_exploration=self.num_exploration, seed=self.seed, verbose=self.verbose, ) self.batch_sizes[job] = batch_sizes self.history[job] = defaultdict(list) if self.verbose: self._log(f"Registered {job}") def predict(self, job: Job) -> int: """Return the batch size to use for the job.""" if self.verbose: self._log(f"Prediction for {job}") pred = self.mabs[job].predict() if self.verbose: self._log(f"{job} -> \033[31mBS = {pred}\033[0m") return pred def observe( self, job: Job, batch_size: int, cost: float, converged: bool | None = None ) -> None: """Learn from the cost of using the given batch size for the job.""" if batch_size not in self.batch_sizes[job]: raise ValueError(f"Unknown batch size '{batch_size}' for {job}.") # No normalization needed since we learn a separate bandit for each job. reward = -cost # Add observation to history. self.history[job][batch_size].append(reward) # When we're not learning the reward precision, everyting is # simple. We can just call `partial_fit` on the job's MAB instance. if not self.learn_reward_precision: self.mabs[job].fit([batch_size], [reward], reset=False) if self.verbose: self._log(f"{job} @ {batch_size}: reward = {reward:.2f}") # When we're learning the reward precision, we need to # 1. re-compute the precision this arm based on the history, # 2. update the arm's reward precision # 3. and `fit` the new MAB instance on all past data. else: arm_rewards = np.array(self.history[job][batch_size]) variance = np.var(arm_rewards) # When there is only one observation for the arm, the variance is zero. # NOTE: We might still want to have a pre-determined reward precision here # because sampling from an infinite precision Gaussian distribution # always returns the mean (the observation), and it will hamper # exploration in the early stage. precision = np.inf if variance == 0.0 else np.reciprocal(variance) mab = self.mabs[job] mab.arm_reward_prec[batch_size] = precision mab.fit_arm(batch_size, arm_rewards, reset=True) self.mabs[job] = mab if self.verbose: arm_rewards_repr = ", ".join([f"{r:.2f}" for r in arm_rewards]) self._log( f"{job} @ {batch_size}: " f"arm_rewards = [{arm_rewards_repr}], reward_prec = {precision}" ) class PruningExploreManager: """Helper class that generates batch sizes to explore and prune.""" def __init__( self, batch_sizes: list[int], default: int, num_pruning_rounds: int = 2, ) -> None: """Initialze the object. Args: batch_sizes: The initial set of batch sizes to prune from. default: The default batch size (b0) to begin exploration from. num_pruning_rounds: How many rounds to run pruning. """ # Sanity checks. if default not in batch_sizes: raise ValueError(f"Default batch size {default} not in {batch_sizes}.") # Save arguments. self.batch_sizes = batch_sizes self.default = default self.num_pruning_rounds = num_pruning_rounds # State self.expecting = default # Generator that returns batch sizes. self.gen = self._exploration_engine() def _exploration_engine( self, ) -> Generator[int | None, tuple[int, float, bool], list[int]]: """Drive pruning exploration. Yields the batch size to be explored. The caller should `send` a tuple of (explored batch size, cost, whether reached). As a safety measure, the explored batch size must match the most recently yielded batch size, and otherwise a `RuntimeError` is raised. Finally, when exploration is over, returns a sorted list of batch sizes that survived pruning. """ for _ in range(self.num_pruning_rounds): # A list of batch sizes that reached the target metric. good: list[int] = [] # We first explore downwards form the default batch size, and then go upwards. idx = self.batch_sizes.index(self.default) down = sorted(self.batch_sizes[: idx + 1], reverse=True) up = sorted(self.batch_sizes[idx + 1 :]) # We track the best cost because the default batch size is updated to the batch # size that performed the best. best_cost = np.inf for bs_list in [down, up]: for bs in bs_list: # We tell the outside world to explore `bs`, and we expect the outside # world to give us back the cost of that `bs`. self.expecting = bs batch_size, cost, reached = yield bs if self.expecting != batch_size: raise RuntimeError( f"PruningExplorationManager: {self.expecting=}, {batch_size=}" ) self.expecting = 0 # An empty `yield` to not proceed to the next batch size when the caller # `send`s in the results. yield # Only batch sizes that reached the target mteric are good. if reached: if best_cost > cost: best_cost = cost self.default = bs good.append(bs) # If the batch size did not reach the target metric, `break`ing here will # allow us to move on to either the next direction of exploration (upwards) # or end this round of pruning exploration. else: break self.expecting = 0 self.batch_sizes = sorted(good) return sorted(self.batch_sizes) def next_batch_size(self) -> int: """Return the next batch size to explore. Raises `StopIteration` when pruning exploration phase is over. The exception instance contains the final set of batch sizes to consider. Access it through `exception.value`. """ batch_size = next(self.gen) assert batch_size is not None, "Call order may have been wrong." return batch_size def report_batch_size_result( self, batch_size: int, cost: float, reached: bool ) -> None: """Report whether the previous batch size reached the target metric. Args: batch_size: The batch size which this cost observation is from. cost: The energy-time cost of running the job with this batch size. reached: Whether the job reached the target metric. """ none = self.gen.send((batch_size, cost, reached)) assert none is None, "Call order may have been wrong." class PruningGTSBatchSizeOptimizer(BatchSizeOptimizer): """One Gaussian Thompson Sampling MAB for each job with double pruning exploration.""" def __init__( self, prior_mean: float = 0.0, prior_precision: float = 0.0, window_size: int = 0, concurrency: bool = False, seed: int = 123456, verbose: bool = True, ) -> None: """Initialze the optimizer. Refer to the constructor of [`GaussianTS`][zeus.policy.mab.GaussianTS] for descriptions of other arguments. Args: window_size: Size of the window for the MAB (for drift handling). concurrency: Whether to support concurrent job submissions. """ self.prior_mean = prior_mean self.prior_precision = prior_precision self.window_size = window_size self.concurrency = concurrency self.seed = seed self.verbose = verbose # One MAB for each job. self.mabs: dict[Job, GaussianTS] = {} # One PruningExplorationManager for each job. self.exp_manager: dict[Job, PruningExploreManager] = {} # Observation history (batch size, reward) for each job. self.history: dict[Job, list[tuple[int, float]]] = {} @property def name(self) -> str: """Name of the batch size optimizer.""" return "Pruning GaussianTS BSO" def register_job(self, job: Job, batch_sizes: list[int]) -> None: """Register the job.""" # Sanity checks. if job.default_bs is None: raise ValueError(f"Default BS not specified for {job}.") if not batch_sizes: raise ValueError(f"Batch size list for {job} is empty.") # Set internal states. self.exp_manager[job] = PruningExploreManager( sorted(batch_sizes), job.default_bs ) self.history[job] = [] if self.verbose: self._log(f"Registered {job}") def predict(self, job: Job) -> int: """Return the batch size to use for the job.""" # Try to see if the exploration manager has something. try: batch_size = self.exp_manager[job].next_batch_size() if self.verbose: self._log(f"{job} in pruning stage -> \033[31mBS = {batch_size}\033[0m") except StopIteration as exp: # Pruning stage is over. if job not in self.mabs: self._construct_mab(job, exp.value) batch_size = self.mabs[job].predict() if self.verbose: self._log( f"{job} in Thompson Sampling stage -> \033[31mBS = {batch_size}\033[0m" ) return batch_size def observe( self, job: Job, batch_size: int, cost: float, converged: bool | None = None ) -> None: """Learn from the cost of using the given batch size for the job.""" # Add observation to history. self.history[job].append((batch_size, -cost)) # We're in Thompson Sampling stage. if job in self.mabs: # Since we're learning the reward precision, we need to # 1. re-compute the precision of this arm based on the reward history, # 2. update the arm's reward precision # 3. and `fit` the new MAB instance on all the reward history. # Note that `arm_rewards` always has more than one entry (and hence a # non-zero variance) because we've been through pruning exploration. arm_rewards = np.array(self._get_history_for_bs(job, batch_size)) precision = np.reciprocal(np.var(arm_rewards)) mab = self.mabs[job] mab.arm_reward_prec[batch_size] = precision mab.fit_arm(batch_size, arm_rewards, reset=True) if self.verbose: arm_rewards_repr = ", ".join([f"{r:.2f}" for r in arm_rewards]) self._log( f"{job} @ {batch_size}: " f"arm_rewards = [{arm_rewards_repr}], reward_prec = {precision}" ) # We're in pruning stage. else: assert converged is not None # Log before we potentially error out. if self.verbose: self._log( f"{job} in pruning stage, expecting BS {self.exp_manager[job].expecting}." f" Current BS {batch_size} that did {'not ' * converged}converge." ) # If we don't support concurrency, we can just pass the results to the # exploration manager, and the manager will err if the order of batch sizes # is screwed up. if not self.concurrency: self.exp_manager[job].report_batch_size_result( batch_size, cost, converged ) return # If we are supporting concurrency, there's a subtle issue. # Pruning exploration demands a specific order of trying out a batch size # and receiving the results (cost and whether reached). This breaks in the # following situation, for example: # 1. Job with BS 32 that is part of pruning exploration starts. # 2. Concurrent job comes in, and we launch it with the best known BS 64. # 3. Job with BS 64 finishes first, and calls bso.observe with BS 64. # This breaks the observation order assumption of PruningExplorationManager. # Thus we check whether the current batch size is the one expected by # PruningExplorationManager, and then only if so, call bso.observe. # Otherwise, we silently insert the cost observation into the bso's history # (first line of this method) and don't touch the PruningExplorationManager. if self.exp_manager[job].expecting == batch_size: self.exp_manager[job].report_batch_size_result( batch_size, cost, converged ) def _get_history_for_bs(self, job: Job, batch_size: int) -> list[float]: """Return the windowed history for the given job's batch size.""" history = self.history[job] rewards = [] # Collect rewards starting from the most recent ones and backwards. for bs, reward in reversed(history): if bs == batch_size: rewards.append(reward) if len(rewards) == self.window_size: break # There's no need to return this in time order, but just in case. return list(reversed(rewards)) def _construct_mab(self, job: Job, batch_sizes: list[int]) -> None: """When exploration is over, this method is called to construct and learn GTS.""" # Sanity check. if not batch_sizes: raise ValueError( "Empty batch size set when constructing MAB. " "Probably all batch sizes have been pruned." ) if self.verbose: self._log(f"Construct MAB for {job} with arms {batch_sizes}") mab = GaussianTS( arms=batch_sizes, # The MAB only has "good" arms. reward_precision=0.0, prior_mean=self.prior_mean, prior_precision=self.prior_precision, num_exploration=2, seed=self.seed, verbose=self.verbose, ) # Fit the arm for each good batch size. for batch_size in self.exp_manager[job].batch_sizes: arm_rewards = np.array(self._get_history_for_bs(job, batch_size)) assert ( len(arm_rewards) >= 2 ), f"Number of observations for {batch_size} is {len(arm_rewards)}." mab.arm_reward_prec[batch_size] = np.reciprocal(np.var(arm_rewards)) mab.fit_arm(batch_size, arm_rewards, reset=True) # Save the MAB. self.mabs[job] = mab class JITPowerLimitOptimizer(PowerLimitOptimizer): """Returns the best power limit to use for the job & batch size.""" def __init__(self, verbose: bool = True) -> None: """Initialize the object.""" self.verbose = verbose self.best_pl: defaultdict[Job, dict[int, int]] = defaultdict(dict) self.best_cost: defaultdict[Job, dict[int, float]] = defaultdict(dict) self.observe_count: defaultdict[Job, defaultdict[int, int]] = defaultdict( lambda: defaultdict(int) ) @property def name(self) -> str: """Name of the power limit optimizer.""" return "JITPSO" def predict(self, job: Job, batch_size: int) -> int | None: """Return the best power limit for the job, or None if unknown.""" pred = self.best_pl[job].get(batch_size) if self.verbose: self._log( f"{job} @ {batch_size} -> \033[31mPL = " f"{'needs profiling' if pred is None else str(pred) + 'W'}\033[0m" ) return pred def observe(self, job: Job, batch_size: int, power_limit: int, cost: float) -> None: """Learn from the cost of using the given knobs for the job.""" self.observe_count[job][batch_size] += 1 prev_best_cost = self.best_cost[job].get(batch_size) if prev_best_cost is None or prev_best_cost > cost: self.best_pl[job][batch_size] = power_limit self.best_cost[job][batch_size] = cost
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/policy/optimizer.py
optimizer.py
from __future__ import annotations import warnings import numpy as np class GaussianTS: """Thompson Sampling policy for Gaussian bandits. For each arm, the reward is modeled as a Gaussian distribution with known precision. The conjugate priors are also Gaussian distributions. """ def __init__( self, arms: list[int], reward_precision: list[float] | float, prior_mean: float = 0.0, prior_precision: float = 0.0, num_exploration: int = 1, seed: int = 123456, verbose: bool = True, ) -> None: """Initialze the object. Args: arms: Bandit arm values to use. reward_precision: Precision (inverse variance) of the reward distribution. Pass in a list of `float`s to set the reward precision differently for each arm. prior_mean: Mean of the belief prior distribution. prior_precision: Precision of the belief prior distribution. num_exploration: How many static explorations to run when no observations are available. seed: The random seed to use. verbose: Whether to print out what's going on. """ self.name = "GaussianTS" self.arms = arms self.prior_mean = prior_mean self.prior_prec = prior_precision self.num_exploration = num_exploration self.seed = seed self.rng = np.random.default_rng(seed) self.verbose = verbose # Set the precision of the reward distribution of each arm. if isinstance(reward_precision, list): self.arm_reward_prec = dict(zip(arms, reward_precision)) else: self.arm_reward_prec = {arm: reward_precision for arm in arms} # Initialze the parameter distribution with the prior parameters. self.arm_param_mean = dict.fromkeys(arms, prior_mean) self.arm_param_prec = dict.fromkeys(arms, prior_precision) # Track how many times an arm reward has been observed. self.arm_num_observations = dict.fromkeys(arms, 0) def fit( self, decisions: list[int] | np.ndarray, rewards: list[float] | np.ndarray, reset: bool, ) -> None: """Fit the bandit on the given list of observations. Args: decisions: A list of arms chosen. rewards: A list of rewards that resulted from choosing the arms in `decisions`. reset: Whether to reset all arms. """ decisions_arr = np.array(decisions) rewards_arr = np.array(rewards) # Fit all arms. for arm in self.arms: self.fit_arm(arm, rewards_arr[decisions_arr == arm], reset) def fit_arm(self, arm: int, rewards: np.ndarray, reset: bool) -> None: """Update the parameter distribution for one arm. Reference: <https://en.wikipedia.org/wiki/Conjugate_prior> Args: arm: Arm to fit. rewards: Array of rewards observed by pulling that arm. reset: Whether to reset the parameters of the arm before fitting. """ if reset: self.arm_param_mean[arm] = self.prior_mean self.arm_param_prec[arm] = self.prior_prec self.arm_num_observations[arm] = 0 if len(rewards) == 0: return # Read previous state. reward_prec = self.arm_reward_prec[arm] mean = self.arm_param_mean[arm] prec = self.arm_param_prec[arm] # Compute the parameters of the posterior distribution. # The reward distribution's precision is given as infinite only when we # have exactly one observation for the arm, s.t. sampling yields that # exact observation. if reward_prec == np.inf: new_prec = np.inf new_mean = rewards.mean() else: new_prec = prec + len(rewards) * reward_prec new_mean = (prec * mean + reward_prec * rewards.sum()) / new_prec # Update state. self.arm_param_mean[arm] = new_mean self.arm_param_prec[arm] = new_prec self.arm_num_observations[arm] += len(rewards) def predict(self) -> int: """Return the arm with the largest sampled expected reward.""" # Exploration-only phase. # Order is random considering concurrent bandit scenarios. arrms = np.array(self.arms) for arm in self.rng.choice(arrms, len(arrms), replace=False): if self.arm_num_observations[arm] < self.num_exploration: if self.verbose: print(f"[{self.name}] Explore arm {arm}.") return arm # Thomopson Sampling phase. expectations = self.predict_expectations() if self.verbose: print(f"[{self.name}] Sampled mean rewards:") for arm, sample in expectations.items(): print( f"[{self.name}] Arm {arm:4d}: mu ~ N({self.arm_param_mean[arm]:.2f}, " f"{1/self.arm_param_prec[arm]:.2f}) -> {sample:.2f}" ) return max(expectations, key=expectations.get) # type: ignore def predict_expectations(self) -> dict[int, float]: """Sample the expected reward for each arm. Assumes that each arm has been explored at least once. Otherwise, a value will be sampled from the prior. Returns: A mapping from every arm to their sampled expected reward. """ expectations = {} for arm in self.arms: mean = self.arm_param_mean[arm] prec = self.arm_param_prec[arm] if prec == self.prior_prec: warnings.warn( f"predict_expectations called when arm '{arm}' is cold.", stacklevel=1, ) expectations[arm] = self.rng.normal(mean, np.sqrt(np.reciprocal(prec))) return expectations
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/policy/mab.py
mab.py
from __future__ import annotations from abc import ABC, abstractmethod from zeus.job import Job class BatchSizeOptimizer(ABC): """Finds out the best batch size to use for the job.""" @property @abstractmethod def name(self) -> str: """Name of the batch size optimizer.""" @abstractmethod def register_job(self, job: Job, batch_sizes: list[int]) -> None: """Prepare internal state so that it can handle the given job. It is assumed that the state of each [`Job`][zeus.job.Job] will be managed separately. Note that [`Job`][zeus.job.Job] is hashable, and thus can be used as dictionary keys. Args: job: New jobs to register. batch_sizes: Batch sizes to consider. """ @abstractmethod def predict(self, job: Job) -> int: """Return the best batch size to use for the job. Args: job: The job to pick the best batch size for. """ @abstractmethod def observe( self, job: Job, batch_size: int, cost: float, converged: bool | None = None ) -> None: """Observe the cost of using the given batch size for the job. Args: job: The job from which this cost observation resulted. batch_size: The batch size used for this run of the job. cost: The energy-time cost of running the job. converged: Whether the job reached its target metric. If may not have reached its target if the job was early stopped based on cost or the maximum epoch was reached. For BSO's that do not take this into account, `None` can be passed. """ def _log(self, message: str) -> None: """Log message with object name.""" print(f"[{self.name}] {message}") class PowerLimitOptimizer(ABC): """Finds out the best power limit to use for the job and batch size.""" @property @abstractmethod def name(self) -> str: """Name of the power limit optimizer.""" @abstractmethod def predict(self, job: Job, batch_size: int) -> int | None: """Return the best power limit for the job and batch size. Args: job: The job to pick the best power limit for. batch_size: The batch size chosen by the [`BatchSizeOptimizer`][zeus.policy.BatchSizeOptimizer] for this job. Returns: The best power limit, or `None` if profiling results via [`observe`][zeus.policy.interface.PowerLimitOptimizer.observe] are needed. """ @abstractmethod def observe(self, job: Job, batch_size: int, power_limit: int, cost: float) -> None: """Observe the cost of using the given batch size and power limit for the job. Args: job: The job from which this cost observation resulted. batch_size: The batch size used for this run of the job. power_limit: The power limit used for this run of the job. cost: The cost of running the job. """ def _log(self, message: str) -> None: """Log message with object name.""" print(f"[{self.name}] {message}")
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/policy/interface.py
interface.py
from __future__ import annotations def zeus_cost(energy: float, time: float, eta_knob: float, max_power: int) -> float: """Compute Zeus's energy-time cost metric. Trades off ETA and TTA based on the value of `eta_knob`. The caller is expected to do bound checking for `eta_knob`, because `eta_knob` does not change frequently. Args: energy: Joules time: seconds eta_knob: Real number in [0, 1]. max_power: The maximum power limit of the GPU. Returns: The cost of the DL training job. """ return eta_knob * energy + (1 - eta_knob) * max_power * time class ZeusCostThresholdExceededError(Exception): """Raised when the predicted cost of the next epoch exceeds the cost threshold. This exception is used for terminating all the processes when doing data parallel training with multiple processes, because ONLY the master process will predict `next_cost` and do the threshold checking. However, once the predicted cost exceeds the threshold, we want to terminate ALL the processes. Currently this is achieved by throwing an exception at the master process. The lauching script will terminate all the processes that are still alive. Attributes: time_consumed (float): Time consumed until the current epoch. energy_consumed (float): Energy consumed until the current epoch. cost (float): Computed Zeus's energy-time cost metric until the current epoch. next_cost (float): Predicted Zeus's energy-time cost metric after next epoch. cost_thresh (float): The cost threshold. """ def __init__( self, time_consumed: float, energy_consumed: float, cost: float, next_cost: float, cost_thresh: float, ) -> None: """Initialize the exception.""" msg = ( f"Next expected cost {next_cost:.2f} exceeds cost threshold {cost_thresh:.2f}! " f"Stopping. Saved training results: time={time_consumed:.2f}, " f"energy={energy_consumed:.2f}, cost={cost:.2f}, reached=false" ) super().__init__(msg) self.time_consumed = time_consumed self.energy_consumed = energy_consumed self.cost = cost self.next_cost = next_cost self.cost_thresh = cost_thresh
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/util/metric.py
metric.py
from __future__ import annotations import os from typing import Type, TypeVar, cast import pynvml T = TypeVar("T") def get_env(name: str, valtype: Type[T], default: T | None = None) -> T: """Fetch an environment variable and cast it to the given type.""" try: if valtype == bool: val = os.environ[name].lower() if val not in ["true", "false"]: raise ValueError(f"Strange boolean environment variable value '{val}'") return cast(T, val == "true") return valtype(os.environ[name]) except KeyError: if default is not None: return default raise ValueError(f"Missing environment variable '{name}'") from None def resolve_gpu_indices( requested_gpu_indices: list[int] | None, ) -> tuple[list[int], list[int]]: """Resolve GPU indices considering `CUDA_VISIBLE_DEVICES`. Args: requested_gpu_indices: A list of user-specified GPU indices. If `None`, assume the user wants all GPUs visible under `CUDA_VISIBLE_DEVICES`. Returns: A tuple of GPU index lists, where the former is CUDA indices under the illusion of `CUDA_VISIBLE_DEVICES` and the latter is the actual CUDA indices that NVML understands. The order of the two lists are the same. """ # Initialize NVML. pynvml.nvmlInit() # Sanity check. if requested_gpu_indices is not None and not requested_gpu_indices: raise ValueError("`requested_gpu_indices` must be None or non-empty.") # Find the NVML GPU indices visible to CUDA, respecting `CUDA_VISIBLE_DEVICES`. if (cuda_visible_device := os.environ.get("CUDA_VISIBLE_DEVICES")) is not None: nvml_visible_indices = [int(idx) for idx in cuda_visible_device.split(",")] else: nvml_visible_indices = list(range(pynvml.nvmlDeviceGetCount())) # NVML GPU indices and CUDA GPU indices should be different. # We always use CUDA GPU indices when communicating with the outside world, # but when dealing with NVML, we use the NVML GPU indices. if requested_gpu_indices is None: nvml_gpu_indices = nvml_visible_indices cuda_gpu_indices = list(range(len(nvml_visible_indices))) else: nvml_gpu_indices = [nvml_visible_indices[idx] for idx in requested_gpu_indices] cuda_gpu_indices = requested_gpu_indices # Deinitialize NVML. pynvml.nvmlShutdown() return cuda_gpu_indices, nvml_gpu_indices
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/util/env.py
env.py
from __future__ import annotations import json import os import pprint import subprocess from copy import deepcopy from pathlib import Path from time import localtime, sleep, strftime import numpy as np import pynvml import torch from zeus.analyze import HistoryEntry from zeus.job import Job from zeus.policy import BatchSizeOptimizer from zeus.util import zeus_cost class ZeusMaster: """Drives Zeus across multiple recurrences of a job. The main purpose of `ZeusMaster` is to launch training scripts integrated with [`ZeusDataLoader`][zeus.run.ZeusDataLoader], controlling it by setting environment variables. For the environment variables, see the [`run_job`][zeus.run.ZeusMaster.run_job] method as well as [`ZeusDataLoader`][zeus.run.ZeusDataLoader]'s class docstring. The optimal batch size is searched for and exploited using the [`BatchSizeOptimizer`][zeus.policy.BatchSizeOptimizer] object passed in through the constructor. """ def __init__( self, batch_size_optimizer: BatchSizeOptimizer, log_base: str, monitor_path: str, seed: int = 123456, observer_mode: bool = False, profile_warmup_iters: int = 10, profile_measure_iters: int = 40, ) -> None: """Initialize the master. Args: batch_size_optimizer: The user is expected to construct the [`BatchSizeOptimizer`][zeus.policy.BatchSizeOptimizer] with the desired policy and pass it into the master class. log_base: Absolute path where logs will be stored. A separate directory will be created inside, whose name is determined by the job and current time. monitor_path: Absolute path to the power monitor binary. seed: The random seed. Every invocation of the [`run`][zeus.run.ZeusMaster.run] method in this class is deterministic given the random seed, because the internal RNG states are deepcopied before servicing jobs. observer_mode: When Observer Mode is on, the maximum power limit is always used instead of the optimal power limit. However, internal time and energy accounting will be done as if the cost-optimal power limit is used. profile_warmup_iters: Number of iterations to warm up on a specific power limit. This is passed to the [`ZeusDataLoader`][zeus.run.ZeusDataLoader]. profile_measure_iters: Number of iterations to measure on a specific power limit. This is passed to the [`ZeusDataLoader`][zeus.run.ZeusDataLoader]. """ # Knob optimizers. self.bso = batch_size_optimizer # Check if monitor_path is absolute. # This is needed since we may change the cwd based on the job's workdir. if not Path(monitor_path).is_absolute(): raise ValueError("monitor_path must be specified as an absolute path.") # Log base directory. # Needs to be absolute because the training job script may have a different # current working directory (when job.workdir is not None). if not Path(log_base).is_absolute(): raise ValueError("log_base must be specified as an absolute path.") os.makedirs(log_base, exist_ok=True) self.log_base = log_base # Save arguments. self.seed = seed self.monitor_path = monitor_path self.observer_mode = observer_mode self.profile_warmup_iters = profile_warmup_iters self.profile_measure_iters = profile_measure_iters # Query the max power limit of the GPU. pynvml.nvmlInit() handle = pynvml.nvmlDeviceGetHandleByIndex(0) minmax = pynvml.nvmlDeviceGetPowerManagementLimitConstraints(handle) # unit: mW self.max_pl = minmax[1] // 1000 # unit: W print( f"[Zeus Master] Max power limit of {pynvml.nvmlDeviceGetName(handle)}: {self.max_pl}W" ) pynvml.nvmlShutdown() def build_logdir( self, job: Job, num_recurrence: int, eta_knob: float, beta_knob: float, exist_ok: bool = True, ) -> str: r"""Build the `ZEUS_LOG_DIR` string and create the directory. Args: job: Job to run. num_recurrence: The total number of recurrences. eta_knob: $\eta$ used in the cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ beta_knob: `beta_knob * min_cost` is the early stopping cost threshold. Set to `np.inf` to disable early stopping. exist_ok: Passed to `os.makedirs`. If `False`, will err if the directory already exists. """ now = strftime("%Y%m%d%H%M%s", localtime()) logdir = ( job.to_logdir() + f"+x{num_recurrence}+eta{eta_knob}+beta{beta_knob}+{now}" ) logdir = f"{self.log_base}/{logdir}" os.makedirs(logdir, exist_ok=exist_ok) return logdir def run_job( self, job: Job, batch_size: int, learning_rate: float, seed: int, logdir: str, rec_i: int, tries: int, eta_knob: float, cost_ub: float, ) -> tuple[float, float, bool]: r"""Launch the training job. Args: job: The job to run. batch_size: The batch size to use. learning_rate: The learning rate to use, scaled based on `batch_size`. seed: The random seed to use for training. logdir: Directory to store log files in. rec_i: Recurrence number of this run of the job. tries: Retry number of this recurrence of the job. eta_knob: $\eta$ used in the cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ cost_ub: Cost upper bound. The job is terminated when the next epoch is going to exceed the cost upper bound. Returns: A tuple of energy consumption, time consumption, and whether the job reached the target metric. """ # Generate job command command = job.gen_command(batch_size, learning_rate, seed, rec_i) # Set environment variables job_id = f"rec{rec_i:02d}+try{tries:02d}" zeus_env = dict( ZEUS_LOG_DIR=logdir, ZEUS_JOB_ID=job_id, ZEUS_COST_THRESH="inf" if cost_ub == np.inf else str(cost_ub), ZEUS_ETA_KNOB=str(eta_knob), ZEUS_TARGET_METRIC=str(job.target_metric), ZEUS_MONITOR_PATH=self.monitor_path, ZEUS_PROFILE_PARAMS=f"{self.profile_warmup_iters},{self.profile_measure_iters}", ZEUS_USE_OPTIMAL_PL=str(not self.observer_mode), ) env = deepcopy(os.environ) env.update(zeus_env) # Training script output captured by the master. job_output = f"{logdir}/{job_id}.train.log" # Training stats (energy, time, reached, end_epoch) written by ZeusDataLoader. # This file being found means that the training job is done. train_json = Path(f"{logdir}/{job_id}+bs{batch_size}.train.json") # Reporting print(f"[run job] Launching job with BS {batch_size}:") print(f"[run job] {zeus_env=}") if job.workdir is not None: print(f"[run job] cwd={job.workdir}") print(f"[run job] {command=}") print(f"[run job] {cost_ub=}") print(f"[run job] Job output logged to '{job_output}'") # Run the job. with open(job_output, "w") as f: # Launch subprocess. # stderr is redirected to stdout, and stdout to the job_output file. proc = subprocess.Popen( command, cwd=job.workdir, stderr=subprocess.STDOUT, stdout=f, ) # Check if training is done. with open(job_output, "r") as jobf: while proc.poll() is None: print(jobf.read(), end="") sleep(1.0) # Print out the rest of the script output. f.flush() print(jobf.read()) # Report exitcode. exitcode = proc.poll() print(f"[run job] Job terminated with exit code {exitcode}.") # `train_json` must exist at this point. if not train_json.exists(): raise RuntimeError(f"{train_json} does not exist.") # Read `train_json` for the training stats. with open(train_json, "r") as f: stats = json.load(f) print(f"[run job] {stats=}") # Casting if not isinstance(stats["reached"], bool): stats["reached"] = stats["reached"].lower() == "true" return float(stats["energy"]), float(stats["time"]), stats["reached"] def run( self, job: Job, num_recurrence: int, batch_sizes: list[int], beta_knob: float, eta_knob: float, ) -> list[HistoryEntry]: r"""Run a job that sequentially recurs without overlap. Args: job: The job to run. num_recurrence: How many times the job recurs. batch_sizes: List of feasible batch sizes. beta_knob: `beta_knob * min_eta` is the early stopping cost threshold. Set to `np.inf` to disable early stopping. eta_knob: $\eta$ used in the cost metric. $\textrm{cost} = \eta \cdot \textrm{ETA} + (1 - \eta) \cdot \textrm{MaxPower} \cdot \textrm{TTA}$ Returns: A list of [`HistoryEntry`][zeus.analyze.HistoryEntry] objects for each job run. """ # Sanity checks if job.default_bs is None: raise ValueError("You must provide a default batch size for the job.") if job.command is None: raise ValueError("You must provide a command format string for the job.") if eta_knob < 0.0 or eta_knob > 1.0: raise ValueError("eta_knob must be in [0.0, 1.0].") print(f"[Zeus Master] {job} x {num_recurrence}") print(f"[Zeus Master] Batch sizes: {batch_sizes}") # Copy all internal state so that simulation does not modify any # internal state and is deterministic w.r.t. the random seed. bso = deepcopy(self.bso) seed = self.seed # ZEUS_LOG_DIR: Where all the logs and files are stored for this run. logdir = self.build_logdir(job, num_recurrence, eta_knob, beta_knob) # Job history list to return. history: list[HistoryEntry] = [] # Save job history to this file, continuously. history_file = f"{logdir}/history.py" # beta_knob * min_cost is the early stopping cost threshold. min_cost = np.inf # Register the job in the BSO. bso.register_job(job, batch_sizes) # Job recurs. for rec_i in range(1, num_recurrence + 1): print(f"\n[Zeus Master] Recurrence: {rec_i}") # The retrying loop. Retry until convergence. cost_acc = 0.0 for tries in range(1, 21): # Fetch the knobs to use. bs = bso.predict(job) # Scale the learning rate. lr = job.scale_lr(bs) # Launch the job. # Power profiling and optimization is done entirely by the ZeusDataLoader. # Early stops based on cost_ub. energy, time, reached = self.run_job( job=job, batch_size=bs, learning_rate=lr, seed=seed, logdir=logdir, rec_i=rec_i, tries=tries, eta_knob=eta_knob, cost_ub=beta_knob * min_cost, ) # The random seed will be unique for each run, but still jobs will be # deterministic w.r.t. each call to `run`. seed += 1 # Compute the cost of this try. num_gpus = torch.cuda.device_count() cost = zeus_cost(energy, time, eta_knob, self.max_pl * num_gpus) print(f"[Zeus Master] {cost=}") # Accumulate the cost to track the total cost of this recurrence. cost_acc += cost # Provide feedback to the BSO. bso.observe(job, bs, cost, reached) # Record history for visualization. history.append(HistoryEntry(bs, None, energy, reached, time)) with open(history_file, "w") as f: # Intended use: # # ```python # from zeus.analyze import HistoryEntry # history = eval(open(history_file).read()) # ``` f.write(pprint.pformat(history) + "\n") # Reached the target metric. Go to next recurrence. if reached: print( "\n[Zeus Master] Reached target metric in " f"{tries} {'try' if tries == 1 else 'tries'}." ) # Track the minimum cost. if min_cost > cost_acc: print( f"\n[Zeus Master] Minimum cost updated from {min_cost} to {cost_acc}." ) min_cost = cost_acc break # Didn't reach the target metric. # We assume that the default BS (set by the user) will converge. if rec_i == 1: raise RuntimeError( f"The default batch size {job.default_bs} did not converge." ) else: print( "\n[Zeus Master] Job did not reach the target metric in 20 trials!" ) raise RuntimeError("Unreachable target metric.") print(f"[Zeus Master]\n{history}") return history
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/run/master.py
master.py
from __future__ import annotations import atexit import json import os import logging from functools import cached_property from pathlib import Path from typing import Generator, Literal, ClassVar import numpy as np import pynvml import torch import torch.distributed as dist from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from zeus.monitor import ZeusMonitor, Measurement from zeus.util.env import get_env from zeus.util.metric import ZeusCostThresholdExceededError, zeus_cost from zeus.util.logging import get_logger # JIT profiling states NOT_PROFILING = "NOT_PROFILING" WARMING_UP = "WARMING_UP" PROFILING = "PROFILING" class ZeusDataLoader(DataLoader): r"""Profiles and optimizes GPU power limit. `ZeusDataLoader` is integrated into the DNN training script, and transparently profiles power and time consumption to determine the optimal GPU power limit. # Integration examples ## Single-GPU ```python from zeus.run import ZeusDataLoader # The one instantiated with max_epochs becomes the train dataloader train_loader = ZeusDataLoader(train_set, batch_size=256, max_epochs=100) eval_loader = ZeusDataLoader(eval_set, batch_size=256) for epoch_number in train_loader.epochs(): for batch in train_loader: # Learn from batch for batch in eval_loader: # Evaluate on batch train_loader.report_metric(validation_metric) ``` ## Data parallel with multi-GPU on a single-node !!! Important Zeus assumes that exactly one process manages one GPU, and hence one instance of [`ZeusDataLoader`][zeus.run.ZeusDataLoader] exists for each GPU. Users can integrate Zeus into existing data parallel training scripts with five specific steps, which are noted below in the comments. Please refer to [our integration example with ImageNet](https://github.com/SymbioticLab/Zeus/tree/master/examples/imagenet/train.py) for a complete example. ```python import torch import torch.distributed as dist import torchvision from zeus.run import ZeusDataLoader # Step 1: Initialize the default process group. # This should be done before instantiating `ZeusDataLoader`. dist.init_process_group( backend=args.dist_backend, init_method=args.dist_url, ) # Step 2: Create a model and wrap it with `DistributedDataParallel`. model = torchvision.models.resnet18() torch.cuda.set_device(local_rank) model.cuda(local_rank) # Zeus assumes that exactly one process manages one GPU. If you are doing data # parallel training, please use `DistributedDataParallel` for model replication # and specify the `device_ids` and `output_device` as below: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank, ) # Step 3: Create instances of `DistributedSampler` to partition the dataset # across the GPUs. train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) eval_sampler = torch.utils.data.distributed.DistributedSampler(eval_set) # Step 4: Instantiate `ZeusDataLoader`. # `distributed="dp"` tells `ZeusDataLoader` to operate in data parallel mode. # The one instantiated with `max_epochs` becomes the train dataloader. train_loader = ZeusDataLoader(train_set, batch_size=256, max_epochs=100, sampler=train_sampler, distributed="dp") eval_loader = ZeusDataLoader(eval_set, batch_size=256, sampler=eval_sampler, distributed="dp") # Step 5: Training loop. # Use the train dataloader's `epochs` generator to allow Zeus to early-stop # based on the cost. Use `report_metric` to let Zeus know the current # validation metric. for epoch_number in train_loader.epochs(): for batch in train_loader: # Learn from batch for batch in eval_loader: # Evaluate on batch # Make sure you all-reduce the validation metric across all GPUs, # since Zeus expects the final validation metric. val_metric_tensor = torch.tensor([validation_metric], device="cuda") dist.all_reduce(val_metric_tensor, async_op=False) train_loader.report_metric(val_metric_tensor.item()) ``` # Environment variables `ZeusDataLoader` interfaces with the outside world via environment variables. Thus, while `ZeusDataLoader` is paired together with [`ZeusMaster`][zeus.run.ZeusMaster] in example scripts, any other "driver" can use `ZeusDataLoader` as long as it sets appropriate environment variables. - `ZEUS_TARGET_METRIC` : Required. Zeus will stop training when this target validation metric is reached. Will be cast to float. - `ZEUS_LOG_DIR` : Directory to store profiling logs. (Default:` "zeus_log"`) - `ZEUS_JOB_ID` : String to prefix in logs. (Default:` "zeus"`) - `ZEUS_COST_THRESH` : Stop training when the energy-time cost will exceed this threshold. (Default:` "inf"`) - `ZEUS_ETA_KNOB` : $\eta$ knob to tradeoff between energy and time. Larger values reduce more energy and sacrifice time. (Default:` "0.5"`) - `ZEUS_MONITOR_PATH` : Path to the Zeus power monitor binary. (Default:` "zeus_monitor"`) - `ZEUS_PROFILE_PARAMS`: Warmup and measure iterations for each power limit, separated by a comma. (Default:` "10,40"`) - `ZEUS_USE_OPTIMAL_PL`: Whether to actually use the optimal power limit found. Setting this to false is the Observer Mode described in Section 5. (Default:` "True"`) """ # The power limit currently set for the GPU. current_gpu_pl: ClassVar[int] = 0 # Train batch size to be accessed by the eval dataloader. train_batch_size: ClassVar[int] = 0 # Length of the eval dataloader. `epochs` in the train dataloader needs this. eval_num_samples: ClassVar[int] = 0 # Train-time power profiling result. Maps power limit to avg_power & throughput. train_power_result: ClassVar[dict[int, float]] = {} train_tput_result: ClassVar[dict[int, float]] = {} # Eval-time power profiling result. Maps power limit to avg_power & throughput. eval_power_result: ClassVar[dict[int, float]] = {} eval_tput_result: ClassVar[dict[int, float]] = {} # Cost-optimal power limit. Set by the train dataloader after the last power limit # was explored. optimal_pl: ClassVar[int] = 0 # Train epoch measurements for time/energy accounting. train_epoch_time: ClassVar[list[float]] = [] # The master process will record ALL GPUs' energy consumption during training. # GPU_i's energy records is `train_epoch_energy[i]`. train_epoch_energy: ClassVar[np.ndarray] = np.empty(0) # Eval-time latency profiling result. Maps power limit to epoch latency. eval_epoch_time: ClassVar[list[float]] = [] # The master process will record ALL GPUs' energy consumption during evaluation. # GPU_i's energy records is `eval_epoch_energy[i]`. eval_epoch_energy: ClassVar[np.ndarray] = np.empty(0) # Zeus monitor instance zeus_monitor: ClassVar[ZeusMonitor | None] = None # ruff: noqa: PLR0912, PLR0915 def __init__( self, *args, batch_size: int, max_epochs: int = -1, distributed: Literal["dp"] | None = None, **kwargs, ) -> None: """Initialize the dataloader. Args: batch_size: Batch size to use for training. max_epochs: Maximum number of epochs to train. **Specify this parameter only to the train data loader.** (Default: `-1`) distributed: Distributed strategy to use for training. If training with single GPU, this value should be `None`; if training using data parallel with multi-GPU on a single node, this value should be `"dp"`. (Default: `None`) *args: Arguments to pass to `torch.utils.data.DataLoader`. **kwargs: Keyword arguments to pass to `torch.utils.data.DataLoader`. Raises: ValueError: `max_epochs` is specified when initializing the evaluation dataloader. RuntimeError: `torch.distributed` package is not available. RuntimeError: The default process group is not initialized. Make sure to call `torch.distributed.init_process_group` to initialize the default process group before doing a multiprocessing distributed training. ValueError: `self.sampler` is not an instance of `DistributedSampler`. An instance of `DistributedSampler` will shuffle and distribute data among GPUs, so it is required for data parallel training. ValueError: `DistributedSampler` passed in `self.sampler` is inconsistent with the default process group. Currently, we assume that all the GPUs in the node will be used for training. In this case, the instance of `DistributedSampler` should have `sampler.num_replicas == torch.distributed.get_world_size()` and `sampler.rank == torch.distributed.get_rank()`. TypeError: Parameter `distributed` is not correctly specified. Currently, it can only be set as `"dp"` or `None`. RuntimeError: Scaling is triggered when the profile window exceeds the number of iterations in one epoch. But latter is too small, so scaling can not produce a valid profile window. Please consider increasing batch size. """ # Save attributes. self.batch_size = batch_size self.split = "train" if max_epochs != -1 else "eval" self.max_epochs = max_epochs self.log_prefix = f"[ZeusDataLoader({self.split})]" self.logger = get_logger(self.log_prefix) # Initialize the DataLoader. super().__init__(*args, batch_size=batch_size, **kwargs) # World size and rank for distributed training. # Set default value for single-GPU. self.world_size = 1 self.rank = 0 # Check whether we are doing a distributed training. # Pass in world size and rank. self.distributed = distributed if self.distributed == "dp": # Check if the distributed package is available. if not dist.is_available(): raise RuntimeError("Requires distributed package to be available.") # Check if the process group is initialized. if not dist.is_initialized(): raise RuntimeError( "Default process group has not been initialized," " please make sure to call `init_process_group`" " before you instantiate `ZeusDataLoader`." ) # Check if `self.sampler` is an instance of DistributedSampler. if not isinstance(getattr(self, "sampler", None), DistributedSampler): raise ValueError( "Sampler is not an instance of `DistributedSampler`." " Data parallel training on multi-GPU requires a `DistributedSampler`." ) # Check the consistency between the sampler and process group. if ( self.sampler.num_replicas != dist.get_world_size() or self.sampler.rank != dist.get_rank() ): raise ValueError( "`DistributedSampler` is inconsistent with the default process group." f" The default process group has `world_size={dist.get_world_size()}`," f" `rank={dist.get_rank()}`." ) self.world_size = dist.get_world_size() self.rank = dist.get_rank() elif self.distributed is not None: raise ValueError('`distributed` currently only accepts `"dp"` or `None`.') if self._is_train: self._log( f"Distributed data parallel: {'ON' if self.world_size > 1 else 'OFF'}" ) if self._is_train: if ZeusDataLoader.train_batch_size != 0: # If max_epochs is specified when initializing a eval dataloader, # it will mistaken itself as a train dataloader. # In this case, raise a ValueError. raise ValueError("Specify max_epochs only to the train dataloader.") # In data parallel training, each DataLoader gets `batch_size=global_batch_size/num_gpus`. # So, we scale the `train_batch_size` for the consistency with ZeusMaster. # NOTE: Zeus assume `global_batch_size == batch_size * num_gpus`. So please ensure that # `global_batch_size` is divisible by `num_gpu` in the training script. ZeusDataLoader.train_batch_size = self.batch_size * self.world_size # Retrieve environment variables from ZeusMaster. self.target_metric = get_env("ZEUS_TARGET_METRIC", float) self.logdir = get_env("ZEUS_LOG_DIR", str, default="zeus_log") self.job_id = get_env("ZEUS_JOB_ID", str, default="zeus") self.cost_thresh = get_env("ZEUS_COST_THRESH", float, default=float("inf")) self.eta_knob = get_env("ZEUS_ETA_KNOB", float, default=0.5) self.monitor_path = get_env( "ZEUS_MONITOR_PATH", str, default="zeus_monitor", ) self.warmup_iter, self.profile_iter = map( int, get_env("ZEUS_PROFILE_PARAMS", str, default="10,40").split(",") ) self.use_optimal_pl = get_env("ZEUS_USE_OPTIMAL_PL", bool, default=True) # Create ZEUS_LOG_DIR if it does not exist. os.makedirs(self.logdir, exist_ok=True) # Whether the target metric was reached. self.target_metric_reached = False # Construct relevant paths. self.train_json = ( f"{self.logdir}/{self.job_id}+bs{self.train_batch_size}.train.json" ) self.power_json = f"{self.logdir}/bs{self.train_batch_size}.power.json" # Numbers related to the dataloader. # sample_num: the number of iterations processed in the current epoch. # num_samples: the total number of iterations in one epoch. self.epoch_num = 0 self.sample_num = 0 self.num_samples = len(self) # Pass the length of the eval dataloader for `epochs`. if not self._is_train: ZeusDataLoader.eval_num_samples = self.num_samples # If the number of iterations in one epoch (`num_samples`) is smaller than or equal # to one profile window (`warmup_iters + profile_iters`), we will not be able to # profile for any power limit. So, we scale the profile window to fit in one epoch. # We also avoid using the last batch of one epoch, becasue when `drop_last == True`, # the last batch will be smaller. This usually happens with large batch size on # small datasets, eg. CIFAR100. if self._is_train and self.warmup_iter + self.profile_iter >= self.num_samples: self._log( f"The profile window takes {self.warmup_iter + self.profile_iter}" f" iterations ({self.warmup_iter} for warmup + {self.profile_iter}" f" for profile) and exceeds the number of iterations ({self.num_samples})" f" in one epoch. Scaling the profile window to fit in one epoch..." ) scaling_factor = (self.num_samples - 1) / ( self.warmup_iter + self.profile_iter ) self.warmup_iter = int(self.warmup_iter * scaling_factor) self.profile_iter = int(self.profile_iter * scaling_factor) if self.warmup_iter == 0 or self.profile_iter == 0: raise RuntimeError( f"Number of iterations in one epoch is {self.num_samples} and" " is too small for applying the scaling. Please consider using" " a smaller batch size. If you are running `run_zeus.py`, please" " pass a smaller value to `--b_max`." ) self._log( f"Scaling done! New profile window takes {self.warmup_iter + self.profile_iter}" f" iterations ({self.warmup_iter} for warmup + {self.profile_iter} for profile)." ) # Power profiling windows # We're interested in the average power and throughput here. # # +----- warmup_start (change power limit) # | +----- prof_start (`_prof_window_push`) # | | +----- prof_end (`_prof_window_pop`) # | warmup | profile | # v iter v iter v # ================================= ===================== # | power limit = 250W | | power limit = 225W ... # ================================= ===================== # # ======================================================= # | Epoch 1 ... # ======================================================= # ^ # | # +------- Time/energy accounting for the entire training job (`_prof_window_push`) # # Initialize variables for profiling self.warmup_start_sample = 0 self.prof_start_sample = 0 self.prof_state = NOT_PROFILING self.prof_pl_index = 0 # Initialize data structure for storing the energy accounting # based on the number of GPUs. if self._is_train: # Sanity check assert self.world_size > 0, f"{self.world_size=}" assert self.max_epochs > 0, f"{self.max_epochs=}" ZeusDataLoader.train_epoch_energy = np.zeros( shape=(self.world_size, self.max_epochs), dtype=np.float64 ) ZeusDataLoader.eval_epoch_energy = np.zeros( shape=(self.world_size, self.max_epochs), dtype=np.float64 ) # Initialize NVML and get GPU handle or each GPU at the master process. self.gpu_handles = [] pynvml.nvmlInit() for index in range(self.world_size): handle = pynvml.nvmlDeviceGetHandleByIndex(index) # Set persistent mode. # TODO(JW): Check SYS_ADMIN permissions and error with an explanation. pynvml.nvmlDeviceSetPersistenceMode(handle, pynvml.NVML_FEATURE_ENABLED) self.gpu_handles.append(handle) # Query NVML for the possible power limit range. Unit is mW. min_pl, self.max_pl = pynvml.nvmlDeviceGetPowerManagementLimitConstraints( self.gpu_handles[0] ) self.power_limits = list(range(self.max_pl, min_pl - 25_000, -25_000)) if self._is_train: self._log(f"Power limit range: {self.power_limits}") # Check whether profiling is ON or OFF. If OFF, load the power limit # from power_json, and set power limit for all GPUs at the master process. if self._is_train and self.rank == 0: should_profile = self._should_profile self._log(f"Power profiling: {'ON' if should_profile else 'OFF'}") # Initialize profiling service ZeusDataLoader.zeus_monitor = ZeusMonitor( list(range(self.world_size)), self.monitor_path, ) # If we need to do profiling, no need to touch the power limit. # If profiling is already done, load profile information from power_json. # Only then do we have the optimal PL available. # We only load in the train dataloader since it populates classvars. if not should_profile: self._load_power_results() self._set_gpu_steady_power_limit() # Make sure NVML is shutdown when the training script exits. if self._is_train: atexit.register(pynvml.nvmlShutdown) def epochs(self) -> Generator[int, None, None]: """Yield the current epoch number from 0 until when training should stop. Training should stop when - the cost reached the cost threshold, or - the maximum number of epochs was reached, or - the target metric was reached. When done, stores the job results in `train_json`. Yields: Epoch indices starting from zero. Raises: ZeusCostThresholdExceededException: the predicted cost after the next epoch exceeds the cost threshold. When doing data parallel training, this exception is used for ternimating all the processes. """ # Sanity check. if not self._is_train: raise RuntimeError("Use epochs() on the train dataloader.") while True: # Variables for storing time/energy consumption & cost time_consumed, energy_consumed = -1, -1 cost = -1 if self.rank == 0: # Sanity checks. enum = self.epoch_num assert ( len(self.train_epoch_time) == enum ), f"{len(self.train_epoch_time)=}" assert ( len(self.eval_epoch_time) == enum ), f"{len(self.eval_epoch_time)=}" # Compute time and energy consumption up to now. # Compute time consumption at GPU_0 time_consumed = sum(self.train_epoch_time + self.eval_epoch_time) # Compute energy consumption over all the GPUs energy_consumed = ( self.train_epoch_energy.sum() + self.eval_epoch_energy.sum() ) cost = zeus_cost( energy_consumed, time_consumed, self.eta_knob, self.max_pl // 1000 * self.world_size, ) self._log( f"Up to epoch {self.epoch_num}: " f"time={time_consumed:.2f}, energy={energy_consumed:.2f}, cost={cost:.2f}" ) # target_metric_reached is set when the current validation metric is reported to # the train dataloader after the end of each epoch. # Stop if the target metric was reached. if self.target_metric_reached: if self.rank == 0: # Sanity check that time/energy consumption & cost are valid in master process. assert time_consumed >= 0 and energy_consumed >= 0 and cost >= 0 self._log( f"Target metric {self.target_metric} was reached! Stopping." ) self._save_train_results(energy_consumed, time_consumed, cost, True) return # Max epoch is a hard stop. if self.epoch_num >= self.max_epochs: if self.rank == 0: # Sanity check that time/energy consumption & cost are valid in master process. assert time_consumed >= 0 and energy_consumed >= 0 and cost >= 0 self._log( f"Maximum number of epochs {self.max_epochs} reached. Stopping." ) self._save_train_results( energy_consumed, time_consumed, cost, False ) return # No need to do anything in the first epoch. if self.epoch_num == 0: yield 0 continue # Just continue if we're profiling. # This will ignore and continue training even if the cost threshold was exceeded. # However, the profiling cost actually exceeding the cost threshold would not # happen frequently. It's more like a wrong cost threshold. if self._should_profile: if cost >= self.cost_thresh: self._log( f"{cost=:.2f} exceeded threshold {self.cost_thresh:.2f} at GPU_{self.rank}, " "but just continue since we're profiling." ) yield self.epoch_num continue if self.rank == 0: # Sanity check that time/energy consumption & cost are valid in master process. assert time_consumed >= 0 and energy_consumed >= 0 and cost >= 0 # We want to predict whether running the next epoch will exceed the cost threshold. next_train_time = ( self.num_samples / self.train_tput_result[self.optimal_pl] ) next_eval_time = ( self.eval_num_samples / self.eval_tput_result[self.optimal_pl] ) next_time = next_train_time + next_eval_time next_train_energy = ( next_train_time * self.train_power_result[self.optimal_pl] ) next_eval_energy = ( next_eval_time * self.eval_power_result[self.optimal_pl] ) next_energy = next_train_energy + next_eval_energy self._log( f"Optimal PL train & eval expected time={next_time:.2f} energy={next_energy:.2f}" ) next_time_consumed = time_consumed + next_time next_energy_consumed = energy_consumed + next_energy next_cost = zeus_cost( next_energy_consumed, next_time_consumed, self.eta_knob, self.max_pl // 1000 * self.world_size, ) self._log( f"Expected next epoch: time={next_time_consumed:.2f}, " f"energy={next_energy_consumed:.2f}, " f"cost={next_cost:.2f}" ) # Stop if the predicted cost of the next epoch exceeds the cost threshold. if next_cost >= self.cost_thresh: # Save training results self._save_train_results( energy_consumed, time_consumed, cost, False ) # NOTE: We use a customized exception to terminate ALL the processes for # the purpose of multiprocessing management. # When doing data parallel training on multiple processes, ONLY the master # process will predict `next_cost` and do the threshold checking. However, # once the predicted cost exceeds the threshold, we want to terminate ALL # the processes. Currently this is achieved by throwing an exception at the # master process. The lauching script will terminate all the processes that # are still alive. raise ZeusCostThresholdExceededError( time_consumed, energy_consumed, cost, next_cost, self.cost_thresh, ) yield self.epoch_num def report_metric(self, metric: float, higher_is_better: bool) -> None: """Report the validation metric to the train dataloader. If doing data parallel training, please make sure to call `dist.all_reduce()` to reduce the validation metric across all GPUs before calling `train_loader.report_metric()`. Args: metric: The validation metric of the current epoch. higher_is_better: For example, this should be `True` for accuracy and `False` for error. """ assert self._is_train, "Use report_metric on the train dataloader." # ruff: noqa: PLR5501 if higher_is_better: if metric >= self.target_metric: self.target_metric_reached = True else: if metric <= self.target_metric: self.target_metric_reached = True @property def _should_profile(self) -> bool: """Whether profiling is not done.""" return not Path(self.power_json).exists() @property def _power_limits_left(self) -> bool: """Whether there are power limits left to profile.""" return self.prof_pl_index < len(self.power_limits) def _compute_optimal_pl(self) -> int: """Return the cost-optimal power limit.""" # Sanity checks. assert ZeusDataLoader.train_tput_result assert ZeusDataLoader.train_power_result # Only compute optimal PL at master process. assert self.rank == 0 # Compute power cost tput = ZeusDataLoader.train_tput_result power = ZeusDataLoader.train_power_result cost_map = { pl: ( self.eta_knob * power[pl] + (1 - self.eta_knob) * self.max_pl * self.world_size ) / tput[pl] for pl in self.power_limits } optimal_pl = min(cost_map.keys(), key=cost_map.get) # type: ignore self._log(f"Cost-optimal power limit is {optimal_pl//1000}W") return optimal_pl def _set_gpu_power_limit(self, power_limit: int) -> None: """Set the GPU's power limit using NVML. This method only invokes NVML when `power_limit` is not the same as the current GPU power limit. Args: power_limit: Power limit to set. """ # Sanity check. # Only set power limit at master process. assert self.rank == 0 assert len(self.gpu_handles) == self.world_size # Set power limit for all GPUs. if self.current_gpu_pl != power_limit: for index in range(self.world_size): pynvml.nvmlDeviceSetPowerManagementLimit( self.gpu_handles[index], power_limit ) self._log(f"[GPU_{index}] Set GPU power limit to {power_limit//1000}W.") ZeusDataLoader.current_gpu_pl = power_limit def _set_gpu_steady_power_limit(self) -> None: """Set the steady power limit based on self.use_optimal_pl.""" # Sanity check. # Only set power limit at master process. assert self.rank == 0 power_limit = ZeusDataLoader.optimal_pl if self.use_optimal_pl else self.max_pl self._log( "Steady state power limit: " f"{'OPT' if self.use_optimal_pl else 'MAX'} {power_limit//1000}W" ) self._set_gpu_power_limit(power_limit) def _log( self, message: str, level: int = logging.INFO, master_only: bool = True ) -> None: """Print out message with prefix. Args: message: The message to log out. level: The logging level to use. (Default: `logging.INFO`) master_only: Whether only logged by master process. Usually set to True for the global logging and False for the GPU-specific logging . If set to False, a prefix indicates which GPU this log comes from will be included as well. (Default: `True`) """ if master_only: if self.rank == 0: self.logger.log(level, "%s", message) else: gpu_log_prefix = f"[GPU_{self.rank}]" self.logger.log(level, "%s %s", gpu_log_prefix, message) @cached_property def _is_train(self) -> bool: """Return whether this dataloader is for training.""" return self.split == "train" @property def _monitor_log_prefix(self) -> str: """Build the prefix for the power monitor log file.""" return f"bs{self.train_batch_size}+e{self.epoch_num}" @property def _monitor(self) -> ZeusMonitor: """Return the `ZeusMonitor` instance.""" assert ( ZeusDataLoader.zeus_monitor is not None ), "ZeusDataLoader.zeus_monitor was not instantiated" return ZeusDataLoader.zeus_monitor def _begin_measurement(self, name: str) -> None: """A wrapper function that starts a measurement window.""" assert self.rank == 0 self._monitor.begin_window(name, sync_cuda=True) def _end_measurement(self, name: str) -> Measurement: """A wrapper function that ends a measurement window and returns measurements.""" assert self.rank == 0 return self._monitor.end_window(name, sync_cuda=True) def _start_warmup(self) -> None: """Let the GPU run for some time with the poewr limit to profile.""" # Sanity checks. assert self._should_profile, f"start_warmup: {self._should_profile=}" assert self._is_train, f"start_warmup: {self._is_train=}" assert self._power_limits_left, f"start_warmup: {self._power_limits_left=}" # Sanity check that this profile window ends before the end of the current epoch. assert ( self.sample_num + self.warmup_iter + self.profile_iter < self.num_samples ), ( "start_warmup: " f"end_of_this_profile_window {self.sample_num + self.warmup_iter + self.profile_iter} " f"< end_of_this_epoch {self.num_samples}" ) # Call cudaSynchronize to make sure this is the iteration boundary. torch.cuda.synchronize() # Change power limit. if self.rank == 0: power_limit = self.power_limits[self.prof_pl_index] self._set_gpu_power_limit(power_limit) self._log(f"Warm-up started with power limit {self.current_gpu_pl//1000}W") self.warmup_start_sample = self.sample_num # Set profiling state. self.prof_state = WARMING_UP def _start_prof(self) -> None: """Start profiling power consumption for the current power limit.""" # Sanity checks. assert self._should_profile, f"start_prof: {self._should_profile=}" assert self._is_train, f"start_prof: {self._is_train=}" assert self._power_limits_left, f"start_prof: {self._power_limits_left=}" # Sanity check that this profile window ends before the end of the current epoch. assert self.sample_num + self.profile_iter < self.num_samples, ( "start_prof: " f"end_of_this_profile_window {self.sample_num + self.profile_iter} " f"< end_of_this_epoch {self.num_samples}" ) if self.rank == 0: # Push profiling window for the current power limit value. # This window will profile for `self.profile_iter` iterations. self._begin_measurement( f"__ZeusDataLoader_power_limit_{self.current_gpu_pl//1000}" ) # Set the sample number when we started profiling. self.prof_start_sample = self.sample_num # Set profiling state. self.prof_state = PROFILING self._log(f"Profile started with power limit {self.current_gpu_pl//1000}W") def _end_prof(self) -> None: """End profiling power consumption for this power limit. Raises: ValueError: ValueError raised by sklearn.metrics.auc in analyze.avg_power, might due to profile window too small. In this case, user should consider increasing profile window. """ # Sanity checks. assert self._should_profile, f"end_prof: {self._should_profile=}" assert self._is_train, f"end_prof: {self._is_train=}" assert self._power_limits_left, f"end_prof: {self._power_limits_left=}" # Sanity check that this profile window ends before the end of the current epoch. assert self.sample_num < self.num_samples, ( "end_prof: " f"end_of_this_profile_window {self.sample_num} " f"< end_of_this_epoch {self.num_samples}" ) # Set profiling state. self.prof_state = NOT_PROFILING # Call cudaSynchronize to make sure this is the iteration boundary. torch.cuda.synchronize() # Advance to the next power limit. Affects self.power_limits_left. self.prof_pl_index += 1 if self.rank == 0: # Pop profiling window for the current power limit and fetch profiling results. profiling_result = self._end_measurement( f"__ZeusDataLoader_power_limit_{self.current_gpu_pl//1000}" ) time_consumed, energy_consumed = ( profiling_result.time, profiling_result.energy, ) # Summing up the average power on all GPUs. sum_avg_power = sum(energy_consumed.values()) / time_consumed self.train_power_result[self.current_gpu_pl] = sum_avg_power # Compute and save throughput. We use the time at the master process. samples_processed = self.sample_num - self.prof_start_sample throughput = samples_processed / time_consumed self.train_tput_result[self.current_gpu_pl] = throughput self._log(f"Profile done with power limit {self.current_gpu_pl//1000}W") # If we're done with all power limits, compute the optimal power limit # and change to that power limit for the rest of the epoch. # This will lead to the eval epoch being run with the optimal power limit, # and since self.should_profile is still True, tput/power will be profiled. # Profiling the optimal power limit on eval set will help us better predict # the time and energy consumed in the next eval epoch, to help us decide # whether running next epoch will exceed the cost threshold. if not self._power_limits_left: self._log("This was the last power limit to explore.") ZeusDataLoader.optimal_pl = self._compute_optimal_pl() self._set_gpu_power_limit(ZeusDataLoader.optimal_pl) def _save_power_results(self) -> None: """Write the power profiling results to `power_json`.""" # Sanity check. # Only save power results at master process. assert self.rank == 0 prof_result = dict( job_id=self.job_id, # Not used. Just for the purpose of record. train_power=self.train_power_result, train_throughput=self.train_tput_result, eval_power=self.eval_power_result, eval_throughput=self.eval_tput_result, optimal_pl=self.optimal_pl, ) # NOTE: Write-then-move needed if we're handling concurrent jobs. with open(self.power_json, "w") as f: json.dump(prof_result, f) with open(self.power_json, "r") as f: self._log("Power profiling done.") self._log(f"Saved {self.power_json}: {f.read()}") def _load_power_results(self) -> None: """Load power profiling information into the class from `power_json`.""" # Sanity check. # Only load power results at master process. assert self.rank == 0 # Helper function that casts the keys of a dictionary to integer. def as_int_key(dictionary: dict[str, float]) -> dict[int, float]: result = {} for key, value in dictionary.items(): result[int(key)] = value return result with open(self.power_json, "r") as f: power_results = json.load(f) ZeusDataLoader.train_power_result = as_int_key(power_results["train_power"]) ZeusDataLoader.train_tput_result = as_int_key(power_results["train_throughput"]) ZeusDataLoader.eval_power_result = as_int_key(power_results["eval_power"]) ZeusDataLoader.eval_tput_result = as_int_key(power_results["eval_throughput"]) ZeusDataLoader.optimal_pl = power_results["optimal_pl"] self._log(f"Loaded {self.power_json}: {power_results}") def _save_train_results( self, energy: float, time_: float, cost: float, reached: bool ) -> None: """Write the job training results to `train_json`.""" # Sanity check. # Only load power results at master process. assert self.rank == 0 train_result = dict( energy=energy, time=time_, cost=cost, # Not used. Just for reference. num_epochs=self.epoch_num, # Not used. Just for reference. reached=reached, ) with open(self.train_json, "w") as f: json.dump(train_result, f) with open(self.train_json, "r") as f: self._log("Training done.") self._log(f"Saved {self.train_json}: {f.read()}") def __iter__(self): """Signal the beginning of an epoch.""" # Sanity check that there is no incomplete profile window at the beginning of epoch, # because we start profiling only if the entire profiling window can fit in the rest of # the training epoch. assert self.prof_state == NOT_PROFILING, f"__iter__: {self.prof_state=}" # Update counters. self.epoch_num += 1 self.sample_num = 0 self._log(f"Epoch {self.epoch_num} begin.") # Cache the dataloader iterator. self.iter = super().__iter__() if self.rank == 0: # Push profiling window for the current epoch. # Note that both train and eval dataloaders will push one profiling window *separately*. self._begin_measurement("__ZeusDataLoader_epoch") # The power limit of the GPU is only changed by the train dataloader (`self._is_train`). # If we're not profiling, use the steady state power limit (`self._should_profile`). # If we are profiling, the power limit will be set in __next__ with warmup. # Power limit result is already loaded in when initializing the train dataloader, # so we just set the power limit directly. if self._is_train and not self._should_profile: self._set_gpu_steady_power_limit() return self def __next__(self): """Signal the beginning of an iteration.""" # Update counters. self.sample_num += 1 # Try to fetch next batch. try: data = next(self.iter) except StopIteration: # End of this epoch. # Sanity check that there is no incomplete profile window at the end of epoch. assert self.prof_state == NOT_PROFILING, f"__next__: {self.prof_state=}" # Make sure all GPU operations are done so that now is the *actual* end of this epoch. torch.cuda.synchronize() # Compute epoch time and energy consumption. # We're interested in the actual time/energy consumption here. # # ================================================================ # | Train || Eval | # ================================================================ # ^ ^^ ^ # | / | | # _prof_window_push() _prof_window_pop() | | # for train loader for train loader | | # | | # _prof_window_push() _prof_window_pop() # for eval loader for eval loader # if self.rank == 0: # Sanity check that `epoch_num` is within valid range assert self.epoch_num >= 1, f"__next__: {self.epoch_num=}" # Pop profiling window for the current epoch and fetch profiling result. profiling_result = self._end_measurement("__ZeusDataLoader_epoch") time_consumed, energy_consumed = ( profiling_result.time, profiling_result.energy, ) sum_energy_consumed = sum(energy_consumed.values()) if self._is_train: self.train_epoch_time.append(time_consumed) # Record the energy consumption for each GPU. for index in range(self.world_size): self.train_epoch_energy[index][ self.epoch_num - 1 ] = energy_consumed[index] else: # Integrate the last time_consumed seconds. self.eval_epoch_time.append(time_consumed) # Record the energy consumption for each GPU. for index in range(self.world_size): self.eval_epoch_energy[index][ self.epoch_num - 1 ] = energy_consumed[index] # For the eval dataloader, we want to record the throughput and power # for the current power limit. Since the train dataloader sets the power limit # to the optimal power limit right after profiling is done, this will naturally # record the tput/power of the optimal PL. From the following epochs where we # don't profile anything, we directly use these values to compute the time and # energy consumed. if self._should_profile: self.eval_tput_result[self.current_gpu_pl] = ( self.num_samples / time_consumed ) self.eval_power_result[self.current_gpu_pl] = ( sum_energy_consumed / time_consumed ) # The optimal PL being known means that all power limits have been explored. # Let us end profiling by writing profile information to `power_json`. if self.optimal_pl != 0: self._save_power_results() self._log( f"{self.split} epoch {self.epoch_num} done: " f"time={time_consumed:.2f} energy={sum_energy_consumed:.2f}" ) # Re-raise StopIteration. raise # We're in the middle of an epoch. The train loader has power limits left to profile. if self._is_train and self._should_profile and self._power_limits_left: # We weren't doing anything. Start warming up if the iterations left in # the current epoch can accommodate at least one profile window. if ( self.prof_state == NOT_PROFILING and self.sample_num + self.warmup_iter + self.profile_iter < self.num_samples ): self._start_warmup() # We're done warming up. Start the actual profiling window. elif ( self.prof_state == WARMING_UP and self.sample_num - self.warmup_start_sample == self.warmup_iter ): self._start_prof() # We're done profiling. Stop the profiling window and gather results. elif ( self.prof_state == PROFILING and self.sample_num - self.prof_start_sample == self.profile_iter ): self._end_prof() return data
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/run/dataloader.py
dataloader.py
from __future__ import annotations import atexit import typing import tempfile from time import time, sleep import multiprocessing as mp import pynvml import pandas as pd from sklearn.metrics import auc from zeus.util.logging import get_logger from zeus.util.env import resolve_gpu_indices def infer_counter_update_period(nvml_handles: list[pynvml.c_nvmlDevice_t]) -> float: """Infer the update period of the NVML power counter. NVML counters can update as slow as 10 Hz depending on the GPU model, so there's no need to poll them too faster than that. This function infers the update period for each unique GPU model and selects the fastest-updating period detected. Then, it returns half the period to ensure that the counter is polled at least twice per update period. """ pynvml.nvmlInit() logger = get_logger(__name__) # For each unique GPU model, infer the update period. update_period = 0.0 gpu_models_covered = set() for handle in nvml_handles: if (model := pynvml.nvmlDeviceGetName(handle)) not in gpu_models_covered: logger.info( "Detected %s, inferring NVML power counter update period.", model ) gpu_models_covered.add(model) detected_period = _infer_counter_update_period_single(handle) logger.info( "Counter update period for %s is %.2f s", model, detected_period, ) if update_period > detected_period: update_period = detected_period pynvml.nvmlShutdown() # Target half the update period to ensure that the counter is enough. update_period /= 2.0 # Anything less than ten times a second is probably too slow. if update_period > 0.1: logger.warning( "Inferred update period (%.2f s) is too long. Using 0.1 s instead.", update_period, ) update_period = 0.1 return update_period def _infer_counter_update_period_single(nvml_handle: pynvml.c_nvmlDevice_t) -> float: """Infer the update period of the NVML power counter for a single GPU.""" # Collect 1000 samples of the power counter with timestamps. time_power_samples: list[tuple[float, int]] = [(0.0, 0) for _ in range(1000)] for i in range(len(time_power_samples)): time_power_samples[i] = ( time(), pynvml.nvmlDeviceGetPowerUsage(nvml_handle), ) # Find the timestamps when the power readings changed. changed_times = [] prev_power = time_power_samples[0][1] for t, p in time_power_samples: if p != prev_power: changed_times.append(t) prev_power = p # Compute the minimum time difference between power change timestamps. return min(time2 - time1 for time1, time2 in zip(changed_times, changed_times[1:])) class PowerMonitor: """Monitor power usage from GPUs. This class acts as a lower level wrapper around a Python process that polls the power consumption of GPUs. This is primarily used by [`ZeusMonitor`][zeus.monitor.ZeusMonitor] for older architecture GPUs that do not support the nvmlDeviceGetTotalEnergyConsumption API. Attributes: gpu_indices (list[int]): Indices of the GPUs to monitor. update_period (int): Update period of the power monitor in seconds. Holds inferred update period if `update_period` was given as `None`. """ def __init__( self, gpu_indices: list[int] | None = None, update_period: float | None = None, ) -> None: """Initialize the power monitor. Args: gpu_indices: Indices of the GPUs to monitor. If None, monitor all GPUs. update_period: Update period of the power monitor in seconds. If None, infer the update period by max speed polling the power counter for each GPU model. """ if gpu_indices is not None and not gpu_indices: raise ValueError("`gpu_indices` must be either `None` or non-empty") # Initialize NVML. pynvml.nvmlInit() # Set up logging. self.logger = get_logger(type(self).__name__) # Get GPU indices and NVML handles. self.gpu_indices, nvml_gpu_indices = resolve_gpu_indices(gpu_indices) nvml_handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in nvml_gpu_indices] self.logger.info("Monitoring power usage of GPUs %s", self.gpu_indices) # Infer the update period if necessary. if update_period is None: update_period = infer_counter_update_period(nvml_handles) self.update_period = update_period # Create the CSV file for power measurements. power_csv = tempfile.mkstemp(suffix=".csv", text=True)[1] open(power_csv, "w").close() self.power_f = open(power_csv) self.power_df_columns = ["time"] + [f"power{i}" for i in self.gpu_indices] self.power_df = pd.DataFrame(columns=self.power_df_columns) # Spawn the power polling process. atexit.register(self._stop) self.process = mp.get_context("spawn").Process( target=_polling_process, args=(nvml_gpu_indices, power_csv, update_period) ) self.process.start() def _stop(self) -> None: """Stop monitoring power usage.""" pynvml.nvmlShutdown() if self.process is not None: self.process.terminate() self.process.join(timeout=1.0) self.process.kill() self.process = None def _update_df(self) -> None: """Add rows to the power dataframe from the CSV file.""" try: additional_df = typing.cast( pd.DataFrame, pd.read_csv(self.power_f, header=None, names=self.power_df_columns), # type: ignore ) except pd.errors.EmptyDataError: return self.power_df = pd.concat([self.power_df, additional_df], axis=0) def get_energy(self, start_time: float, end_time: float) -> dict[int, float] | None: """Get the energy used by the GPUs between two times. Args: start_time: Start time of the interval, from time.time(). end_time: End time of the interval, from time.time(). Returns: A dictionary mapping GPU indices to the energy used by the GPU between the two times. GPU indices are from the DL framework's perspective after applying `CUDA_VISIBLE_DEVICES`. If there are no power readings, return None. """ self._update_df() if self.power_df.empty: return None df = typing.cast( pd.DataFrame, self.power_df.query(f"{start_time} <= time <= {end_time}") ) try: return { i: float(auc(df["time"], df[f"power{i}"])) for i in self.gpu_indices } except ValueError: return None def get_power(self, time: float | None = None) -> dict[int, float] | None: """Get the power usage of the GPUs at a specific time point. Args: time: Time point to get the power usage at. If None, get the power usage at the last recorded time point. Returns: A dictionary mapping GPU indices to the power usage of the GPU at the specified time point. GPU indices are from the DL framework's perspective after applying `CUDA_VISIBLE_DEVICES`. If there are no power readings, return None. """ self._update_df() if self.power_df.empty: return None if time is None: row = self.power_df.iloc[-1] else: ind = self.power_df.time.searchsorted(time) try: row = self.power_df.iloc[ind] except IndexError: # This means that the time is after the last recorded power reading. row = self.power_df.iloc[-1] return {i: float(row[f"power{i}"]) for i in self.gpu_indices} def _polling_process( nvml_gpu_indices: list[int], power_csv: str, update_period: float, ) -> None: """Run the power monitor.""" try: pynvml.nvmlInit() nvml_handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in nvml_gpu_indices] # Use line buffering. with open(power_csv, "w", buffering=1) as power_f: while True: power: list[float] = [] now = time() for nvml_handle in nvml_handles: power.append(pynvml.nvmlDeviceGetPowerUsage(nvml_handle)) power_str = ",".join(map(lambda p: str(p / 1000), power)) power_f.write(f"{now},{power_str}\n") if (sleep_time := update_period - (time() - now)) > 0: sleep(sleep_time) except KeyboardInterrupt: return finally: pynvml.nvmlShutdown()
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/monitor/power.py
power.py
from __future__ import annotations import time import argparse from datetime import datetime import rich from zeus.monitor.energy import ZeusMonitor from zeus.monitor.power import PowerMonitor def energy(gpu_indices: list[int] | None = None) -> None: """Measure the time and energy of GPUs using the ZeusMonitor. Args: gpu_indices: Indices of GPUs to monitor. Not ommitted, all GPUs will be monitored. """ monitor = ZeusMonitor(gpu_indices) monitor.begin_window("zeus.monitor.energy") try: # "Forever" time.sleep(365 * 24 * 60 * 60) except KeyboardInterrupt: energy = monitor.end_window("zeus.monitor.energy") if energy is not None: rich.print("Total energy (J):", energy) def power( gpu_indices: list[int] | None = None, update_period: float = 1.0, ) -> None: """Monitor the power consumption of GPUs during the duration of the CLI program. Args: gpu_indices: Indices of GPUs to monitor. Not ommitted, all GPUs will be monitored. update_period: The time between power measurements in seconds. """ monitor = PowerMonitor(gpu_indices=gpu_indices, update_period=update_period) start_time = time.time() update_period = monitor.update_period def map_gpu_index_to_name(measurements: dict[int, float]) -> dict[str, float]: return {f"GPU{k}": v for k, v in measurements.items()} try: while True: time.sleep(update_period) power = monitor.get_power() if power is None: continue rich.print(datetime.now(), map_gpu_index_to_name(power)) except KeyboardInterrupt: end_time = time.time() rich.print("\nTotal time (s):", end_time - start_time) energy = monitor.get_energy(start_time, end_time) if energy is not None: rich.print("Total energy (J):", map_gpu_index_to_name(energy)) if __name__ == "__main__": parser = argparse.ArgumentParser( prog="python -m zeus.monitor", description="Zeus Monitor CLI", ) # Subcommands for energy and power subparsers = parser.add_subparsers( dest="subcommand", required=True, help="The subcommand to run. See `zeus monitor <subcommand> --help` for more information.", ) energy_parser = subparsers.add_parser( "energy", help="Measure the time and energy consumption of specified GPU indices.", ) power_parser = subparsers.add_parser( "power", help="Monitor the power consumption of specified GPU indices, and compute total energy on exit (CTRL+C).", ) # Arguments for energy energy_parser.add_argument( "--gpu-indices", nargs="+", type=int, help="Indices of GPUs to monitor. If omitted, all GPUs will be monitored.", ) # Arguments for power power_parser.add_argument( "--gpu-indices", nargs="+", type=int, help="Indices of GPUs to monitor. If omitted, all GPUs will be monitored.", ) power_parser.add_argument( "--update-period", type=float, default=1.0, help="The time between power measurements in seconds.", ) args = parser.parse_args() # Dispatch to the appropriate subcommand if args.subcommand == "energy": energy(args.gpu_indices) elif args.subcommand == "power": power(args.gpu_indices, args.update_period) else: raise ValueError(f"Unknown subcommand: {args.subcommand}")
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/monitor/__main__.py
__main__.py
from __future__ import annotations import os import atexit import logging from time import time from pathlib import Path from dataclasses import dataclass from functools import cached_property, lru_cache import pynvml from zeus.monitor.power import PowerMonitor from zeus.util.logging import get_logger from zeus.util.framework import cuda_sync from zeus.util.env import resolve_gpu_indices @dataclass class Measurement: """Measurement result of one window. Attributes: time: Time elapsed (in seconds) during the measurement window. energy: Maps GPU indices to the energy consumed (in Joules) during the measurement window. GPU indices are from the DL framework's perspective after applying `CUDA_VISIBLE_DEVICES`. """ time: float energy: dict[int, float] @cached_property def total_energy(self) -> float: """Total energy consumed (in Joules) during the measurement window.""" return sum(self.energy.values()) class ZeusMonitor: """Measure the GPU energy and time consumption of a block of code. Works for multi-GPU and heterogeneous GPU types. Aware of `CUDA_VISIBLE_DEVICES`. For instance, if `CUDA_VISIBLE_DEVICES=2,3`, GPU index `1` passed into `gpu_indices` will be interpreted as CUDA device `3`. You can mark the beginning and end of a measurement window, during which the GPU energy and time consumed will be recorded. Multiple concurrent measurement windows are supported. For Volta or newer GPUs, energy consumption is measured very cheaply with the `nvmlDeviceGetTotalEnergyConsumption` API. On older architectures, this API is not supported, so a separate Python process is used to poll `nvmlDeviceGetPowerUsage` to get power samples over time, which are integrated to compute energy consumption. ## Integration Example ```python from zeus.monitor import ZeusMontior # Time/Energy measurements for four GPUs will begin and end at the same time. gpu_indices = [0, 1, 2, 3] monitor = ZeusMonitor(gpu_indices) # Mark the beginning of a measurement window. You can use any string # as the window name, but make sure it's unique. monitor.begin_window("entire_training") # Actual work training(x, y) # Mark the end of a measurement window and retrieve the measurment result. result = monitor.end_window("entire_training") # Print the measurement result. time_consumed, energy_consumed = prof_result.time, prof_result.energy print(f"Training took {time_consumed} seconds.") for gpu_idx, gpu_energy in zip(gpu_indices, energy_consumed): print(f"GPU {gpu_idx} consumed {gpu_energy} Joules.") ``` Attributes: gpu_indices (`list[int]`): Indices of all the CUDA devices to monitor, from the DL framework's perspective after applying `CUDA_VISIBLE_DEVICES`. nvml_gpu_indices (`list[int]`): Indices of all the CUDA devices to monitor, from NVML/system's perspective. """ def __init__( self, gpu_indices: list[int] | None = None, approx_instant_energy: bool = False, log_file: str | Path | None = None, ) -> None: """Instantiate the monitor. Args: gpu_indices: Indices of all the CUDA devices to monitor. Time/Energy measurements will begin and end at the same time for these GPUs (i.e., synchronized). If None, all the GPUs available will be used. `CUDA_VISIBLE_DEVICES` is respected if set, e.g., GPU index `1` passed into `gpu_indices` when `CUDA_VISIBLE_DEVICES=2,3` will be interpreted as CUDA device `3`. `CUDA_VISIBLE_DEVICES`s formatted with comma-separated indices are supported. approx_instant_energy: When the execution time of a measurement window is shorter than the NVML energy counter's update period, energy consumption may be observed as zero. In this case, if `approx_instant_energy` is True, the window's energy consumption will be approximated by multiplying the current instantaneous power consumption with the window's execution time. This should be a better estimate than zero, but it's still an approximation. log_file: Path to the log CSV file. If `None`, logging will be disabled. """ # Save arguments. self.approx_instant_energy = approx_instant_energy # Initialize NVML. pynvml.nvmlInit() atexit.register(pynvml.nvmlShutdown) # CUDA GPU indices and NVML GPU indices are different if `CUDA_VISIBLE_DEVICES` is set. self.gpu_indices, self.nvml_gpu_indices = resolve_gpu_indices(gpu_indices) # Save all the NVML GPU handles. These should be called with system-level GPU indices. self.gpu_handles: dict[int, pynvml.c_nvmlDevice_t] = {} for nvml_gpu_index, gpu_index in zip(self.nvml_gpu_indices, self.gpu_indices): handle = pynvml.nvmlDeviceGetHandleByIndex(nvml_gpu_index) self.gpu_handles[gpu_index] = handle # Initialize loggers. self.logger = get_logger(type(self).__name__) if log_file is None: self.log_file = None else: if dir := os.path.dirname(log_file): os.makedirs(dir, exist_ok=True) self.log_file = open(log_file, "w") self.logger.info("Writing measurement logs to %s.", log_file) self.log_file.write( f"start_time,window_name,elapsed_time,{','.join(map(lambda i: f'gpu{i}_energy', self.gpu_indices))}\n", ) self.log_file.flush() self.logger.info("Monitoring GPU %s.", self.gpu_indices) # A dictionary that maps the string keys of active measurement windows to # the state of the measurement window. Each element in the dictionary is a tuple of: # 1) Time elapsed at the beginning of this window. # 2) Total energy consumed by each >= Volta GPU at the beginning of # this window (`None` for older GPUs). self.measurement_states: dict[str, tuple[float, dict[int, float]]] = {} # Initialize power monitors for older architecture GPUs. old_gpu_indices = [ gpu_index for gpu_index, is_new in zip(self.gpu_indices, self._is_new_arch_flags) if not is_new ] if old_gpu_indices: self.power_monitor = PowerMonitor( gpu_indices=old_gpu_indices, update_period=None ) else: self.power_monitor = None @lru_cache def _is_new_arch(self, gpu: int) -> bool: """Return whether the GPU is Volta or newer.""" return ( pynvml.nvmlDeviceGetArchitecture(self.gpu_handles[gpu]) >= pynvml.NVML_DEVICE_ARCH_VOLTA ) @cached_property def _is_new_arch_flags(self) -> list[bool]: """A list of flags indicating whether each GPU is Volta or newer.""" return [self._is_new_arch(gpu) for gpu in self.gpu_handles] def _get_instant_power(self) -> tuple[dict[int, float], float]: """Measure the power consumption of all GPUs at the current time.""" power_measurement_start_time: float = time() power = { i: pynvml.nvmlDeviceGetPowerUsage(h) / 1000.0 for i, h in self.gpu_handles.items() } power_measurement_time = time() - power_measurement_start_time return power, power_measurement_time def begin_window(self, key: str, sync_cuda: bool = True) -> None: """Begin a new measurement window. Args: key: Unique name of the measurement window. sync_cuda: Whether to synchronize CUDA before starting the measurement window. (Default: `True`) """ # Make sure the key is unique. if key in self.measurement_states: raise ValueError(f"Measurement window '{key}' already exists") # Call cudaSynchronize to make sure we freeze at the right time. if sync_cuda: for gpu_index in self.gpu_handles: cuda_sync(gpu_index) # Freeze the start time of the profiling window. timestamp: float = time() energy_state: dict[int, float] = {} for gpu_index, gpu_handle in self.gpu_handles.items(): # Query energy directly if the GPU has newer architecture. # Otherwise, the Zeus power monitor is running in the background to # collect power consumption, so we just need to read the log file later. if self._is_new_arch(gpu_index): energy_state[gpu_index] = ( pynvml.nvmlDeviceGetTotalEnergyConsumption(gpu_handle) / 1000.0 ) # Add measurement state to dictionary. self.measurement_states[key] = (timestamp, energy_state) self._log(f"Measurement window '{key}' started.") def end_window( self, key: str, sync_cuda: bool = True, cancel: bool = False ) -> Measurement: """End a measurement window and return the time and energy consumption. Args: key: Name of an active measurement window. sync_cuda: Whether to synchronize CUDA before ending the measurement window. (default: `True`) cancel: Whether to cancel the measurement window. If `True`, the measurement window is assumed to be cancelled and discarded. Thus, an empty Measurement object will be returned and the measurement window will not be recorded in the log file either. `sync_cuda` is still respected. """ # Retrieve the start time and energy consumption of this window. try: start_time, start_energy = self.measurement_states.pop(key) except KeyError: raise ValueError(f"Measurement window '{key}' does not exist") from None # Take instant power consumption measurements. # This, in theory, is introducing extra NVMLs call in the critical path # even if computation time is not so short. However, it is reasonable to # expect that computation time would be short if the user explicitly # turned on the `approx_instant_energy` option. Calling this function # as early as possible will lead to more accurate energy approximation. power, power_measurement_time = ( self._get_instant_power() if self.approx_instant_energy else ({}, 0.0) ) # Call cudaSynchronize to make sure we freeze at the right time. if sync_cuda: for gpu_index in self.gpu_handles: cuda_sync(gpu_index) # If the measurement window is cancelled, return an empty Measurement object. if cancel: self._log(f"Measurement window '{key}' cancelled.") return Measurement(time=0.0, energy={gpu: 0.0 for gpu in self.gpu_handles}) end_time: float = time() time_consumption: float = end_time - start_time energy_consumption: dict[int, float] = {} for gpu_index, gpu_handle in self.gpu_handles.items(): # Query energy directly if the GPU has newer architecture. if self._is_new_arch(gpu_index): end_energy = ( pynvml.nvmlDeviceGetTotalEnergyConsumption(gpu_handle) / 1000.0 ) energy_consumption[gpu_index] = end_energy - start_energy[gpu_index] # If there are older GPU architectures, the PowerMonitor will take care of those. if self.power_monitor is not None: energy = self.power_monitor.get_energy(start_time, end_time) # Fallback to the instant power measurement if the PowerMonitor does not # have the power samples. if energy is None: energy = {gpu: 0.0 for gpu in self.power_monitor.gpu_indices} energy_consumption |= energy # Approximate energy consumption if the measurement window is too short. if self.approx_instant_energy: for gpu_index in self.gpu_indices: if energy_consumption[gpu_index] == 0.0: energy_consumption[gpu_index] = power[gpu_index] * ( time_consumption - power_measurement_time ) self._log(f"Measurement window '{key}' ended.") # Add to log file. if self.log_file is not None: self.log_file.write( f"{start_time},{key},{time_consumption}," + ",".join(str(energy_consumption[gpu]) for gpu in self.gpu_indices) + "\n" ) self.log_file.flush() return Measurement(time_consumption, energy_consumption) def _log( self, message: str, gpu_index: int | None = None, level: int = logging.INFO ) -> None: """Print out message with prefix. Args: message: The message to log out. gpu_index: The index of GPU for GPU-level logging. Should be `None` when logging global information. (Default: `None`) level: The logging level to use. (Default: `logging.INFO`) """ if gpu_index is not None: message = f"[GPU {gpu_index}] {message}" self.logger.log(level, message)
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/zeus/monitor/energy.py
energy.py
import argparse import os import warnings from datetime import datetime, timedelta from pathlib import Path import datasets import numpy as np import pandas as pd def main(output_dir: str) -> None: """Run the main routine.""" # Prepare raw dataset print("Preparing raw dataset.") df = datasets.load_dataset("sentiment140")["train"].to_pandas() with warnings.catch_warnings(): warnings.filterwarnings("ignore") df["date"] = pd.to_datetime(df.date) df["ind"] = df.index df = df.set_index("date") df = df.sort_index() df = df.rename({"sentiment": "label"}, axis=1) df = df.drop(["user", "query"], axis=1) # Slice datasets stride = timedelta(days=1) size = 500_000 print(f"Slicing datasets with stride {stride} and size {size}.") slice_index = 1 sliced = [] # Skip April since there are too many days with no tweets. now = datetime(year=2009, month=5, day=1) end = df.index.max() while now < end: loc = df.index.get_loc(now.strftime("%m/%d/%Y")).start slice_df = df.iloc[loc : loc + size] if len(slice_df) < size: break # Compute sample overlap ratio if sliced: overlap = len(sliced[-1].merge(slice_df, how="inner", on="ind")) print( f"{slice_index:2d}: {slice_df.index.min()} ~ {slice_df.index.max()}, overlap = {overlap/size:.3f}" ) else: print(f"{slice_index:2d}: {slice_df.index.min()} ~ {slice_df.index.max()}") sliced.append(slice_df) slice_index += 1 now += stride print(f"{len(sliced)} datasets of size {size} were created.") # Split train and validation sets and save print("Sampling validation set and saving.") seed = 42 train_frac = 0.85 save_dir = Path(output_dir) os.makedirs(save_dir, exist_ok=True) for slice_index, dataset in enumerate(sliced): ind = np.random.default_rng(seed).permutation(len(dataset)) shuffled = dataset.iloc[ind] boundary = int(len(dataset) * train_frac) train = shuffled.iloc[:boundary].drop(["ind"], axis=1).reset_index() val = shuffled.iloc[boundary:].drop(["ind"], axis=1).reset_index() train.to_json( save_dir / f"{slice_index+1}_train.json", orient="records", lines=True ) val.to_json( save_dir / f"{slice_index+1}_val.json", orient="records", lines=True ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output-dir", "-o", required=True, help="Directory to save Capriccio" ) args = parser.parse_args() main(args.output_dir)
zeus-ml
/zeus-ml-0.7.0.tar.gz/zeus-ml-0.7.0/capriccio/generate.py
generate.py
from collections import Iterable as IterableType __author__ = "Dmitry Veselov" __version__ = "0.1.1" class Element(object): template = "<{name}{attributes}>{content}</{name}>" def __init__(self): self.attributes = None self.content = None def __lshift__(self, attributes): """ Used for setting element attributes, like #id or .class. """ if not isinstance(attributes, dict): message = "Element attributes must be a dictionary: '{0}'" raise ValueError(message.format(attributes)) else: self.attributes = attributes return self # allows to chain our magic calls def __rshift__(self, content): """ Used for filling content of element, e.g. add <li> tags into <ul>. """ if isinstance(content, str): # plain text (html?) content self.content = content elif isinstance(content, (Element, ElementProxy)): self.content = str(content) else: if content: if isinstance(content, IterableType): self.content = "".join(map(str, content)) # render each element in @content iterable else: self.content = str(content) else: self.content = "" return self def __str__(self): if self.attributes: attributes = " ".join(self.rendered_attributes) if attributes: attributes = " " + attributes else: attributes = "" content = self.content or "" return self.template.format(name=self.element_name, attributes=attributes, content=content) def __repr__(self): return str(self) # rendering helpers @property def rendered_attributes(self): for name, value in self.attributes.items(): if isinstance(value, bool): if value: template = "{name}" else: continue else: template = "{name}=\"{value}\"" yield template.format(name=name, value=value) @property def element_name(self): return self.__class__.__name__.lower() # HTML elements, from https://developer.mozilla.org/en/docs/Web/HTML/Element class HTML(Element): pass class HTML5(Element): # same as HTML element, but with doctype declaration template = "<!DOCTYPE html5><html{attributes}>{content}</html>" class Comment(Element): template = "<!-- {content} -->" class Base(Element): template = "<{name}{attributes}>" class Head(Element): pass class Link(Element): template = "<{name}{attributes}>" class Meta(Element): template = "<{name}{attributes}>" class Style(Element): pass class Title(Element): pass class Address(Element): pass class Article(Element): pass class Body(Element): pass class Footer(Element): pass class Header(Element): pass class H1(Element): pass class H2(Element): pass class H3(Element): pass class H4(Element): pass class H5(Element): pass class H6(Element): pass class HGroup(Element): pass class Nav(Element): pass class Section(Element): pass class Blockquote(Element): pass class DD(Element): pass class Div(Element): pass class DL(Element): pass class DT(Element): pass class Figcaption(Element): pass class Figure(Element): pass class HR(Element): template = "<{name}{attributes}>" class Li(Element): pass class Main(Element): pass class OL(Element): pass class P(Element): pass class Pre(Element): pass class UL(Element): pass class A(Element): pass class Abbr(Element): pass class B(Element): pass class Bdi(Element): pass class Bdo(Element): pass class Br(Element): template = "<{name}>" class Cite(Element): pass class Code(Element): pass class Data(Element): pass class Dfn(Element): pass class Em(Element): pass class I(Element): pass class Kbd(Element): pass class Mark(Element): pass class Q(Element): pass class Rp(Element): pass class Rt(Element): pass class Ruby(Element): pass class S(Element): pass class Samp(Element): pass class Small(Element): pass class Span(Element): pass class Strong(Element): pass class Sub(Element): pass class Sup(Element): pass class Time(Element): pass class U(Element): pass class Var(Element): pass class Wbr(Element): template = "<{name}>" class Area(Element): template = "<{name}{attributes}/>" class Audio(Element): pass class Img(Element): template = "<{name}{attributes}/>" class Map(Element): pass class Track(Element): template = "<{name}{attributes}>" class Video(Element): pass class Embed(Element): template = "<{name}{attributes}>" class Iframe(Element): pass class Object_(Element): template = "<object{attributes}>{content}</object>" class Param(Element): template = "<{name}{attributes}>" class Source(Element): template = "<{name}{attributes}>" class Canvas(Element): pass class Noscript(Element): pass class Script(Element): pass class Del(Element): pass class Ins(Element): pass class Caption(Element): pass class Col(Element): template = "<{name}{attributes}>" class Colgroup(Element): pass class Table(Element): pass class Tbody(Element): pass class Td(Element): pass class Tfoot(Element): pass class Th(Element): pass class Thead(Element): pass class Tr(Element): pass class Button(Element): pass class Datalist(Element): pass class Fieldset(Element): pass class Form(Element): pass class Input(Element): template = "<{name}{attributes}>" class Keygen(Element): template = "<{name}{attributes}>" class Label(Element): pass class Legend(Element): pass class Meter(Element): pass class Optgroup(Element): pass class Option(Element): pass class Output(Element): pass class Progress(Element): pass class Select(Element): pass class Textarea(Element): pass class ElementProxy(object): def __init__(self, cls): self.cls = cls def __lshift__(self, attributes): element = self.cls() element << attributes return element def __rshift__(self, content): element = self.cls() element >> content return element def __str__(self): return str(self.cls()) def __repr__(self): return repr(self.cls()) html = ElementProxy(HTML) html5 = ElementProxy(HTML5) base = ElementProxy(Base) head = ElementProxy(Head) link = ElementProxy(Link) meta = ElementProxy(Meta) style = ElementProxy(Style) title = ElementProxy(Title) address = ElementProxy(Address) article = ElementProxy(Article) body = ElementProxy(Body) footer = ElementProxy(Footer) header = ElementProxy(Header) nav = ElementProxy(Nav) section = ElementProxy(Section) blockquote = ElementProxy(Blockquote) dd = ElementProxy(DD) div = ElementProxy(Div) dl = ElementProxy(DL) dt = ElementProxy(DT) figcaption = ElementProxy(Figcaption) figure = ElementProxy(Figure) hr = ElementProxy(HR) li = ElementProxy(Li) main = ElementProxy(Main) ol = ElementProxy(OL) p = ElementProxy(P) pre = ElementProxy(Pre) ul = ElementProxy(UL) a = ElementProxy(A) abbr = ElementProxy(Abbr) b = ElementProxy(B) bdi = ElementProxy(Bdi) bdi = ElementProxy(Bdi) br = ElementProxy(Br) cite = ElementProxy(Cite) code = ElementProxy(Code) data = ElementProxy(Data) dfn = ElementProxy(Dfn) em = ElementProxy(Em) i = ElementProxy(I) kbd = ElementProxy(Kbd) mark = ElementProxy(Mark) q = ElementProxy(Q) rp = ElementProxy(Rp) rt = ElementProxy(Rt) ruby = ElementProxy(Ruby) s = ElementProxy(S) samp = ElementProxy(Samp) small = ElementProxy(Small) span = ElementProxy(Span) strong = ElementProxy(Data) sub = ElementProxy(Sub) sup = ElementProxy(Sup) time = ElementProxy(Time) u = ElementProxy(U) var = ElementProxy(Var) wbr = ElementProxy(Wbr) area = ElementProxy(Area) audio = ElementProxy(Audio) img = ElementProxy(Img) map_ = ElementProxy(Map) track = ElementProxy(Track) video = ElementProxy(Video) embed = ElementProxy(Embed) iframe = ElementProxy(Iframe) object_ = ElementProxy(Object_) param = ElementProxy(Param) source = ElementProxy(Param) canvas = ElementProxy(Canvas) noscript = ElementProxy(Noscript) script = ElementProxy(Script) del_ = ElementProxy(Del) ins = ElementProxy(Ins) caption = ElementProxy(Caption) col = ElementProxy(Col) colgroup = ElementProxy(Colgroup) table = ElementProxy(Table) tbody = ElementProxy(Tbody) td = ElementProxy(Td) tfoot = ElementProxy(Tfoot) th = ElementProxy(Th) thead = ElementProxy(Thead) tr = ElementProxy(Tr) button = ElementProxy(Button) datalist = ElementProxy(Datalist) fieldset = ElementProxy(Fieldset) form = ElementProxy(Form) input = ElementProxy(Input) keygen = ElementProxy(Keygen) label = ElementProxy(Label) legend = ElementProxy(Legend) meter = ElementProxy(Meter) optgroup = ElementProxy(Optgroup) option = ElementProxy(Option) output = ElementProxy(Output) progress = ElementProxy(Progress) select = ElementProxy(Select) textarea = ElementProxy(Textarea) h1 = ElementProxy(H1) h2 = ElementProxy(H2) h3 = ElementProxy(H3) h4 = ElementProxy(H4) h5 = ElementProxy(H5) h6 = ElementProxy(H6) hgroup = ElementProxy(HGroup)
zeus
/zeus-0.1.1.tar.gz/zeus-0.1.1/zeus.py
zeus.py
import json import sys from zeusai_py.io import client from zeusai_py import exceptions def _send_request(endpoint: str, params) -> None: """Helper function to send a request to the host. :param endpoint: String, the endpoint to send the request for. :param params: The appropriate params for the endpoint. :return: None """ sys.stdout.write(json.dumps({"endpoint": endpoint, "params": params})) sys.stdout.flush() def _get_response() -> dict: """Helper function to wait for and returns a response, or raises an exception. Note that this blocks until a response is received from the host. :raise: InvalidEndpoint, InvalidParams, ForbiddenInStandalone, NotImplementedError :return: Dictionary containing the response from the server. """ # Get and load the input response = sys.stdin.readline() response = json.loads(response) if response["endpoint"] == "ERROR": params = response["params"] error = params["error"] # Raise different exceptions depending on the error. if error == "invalid endpoint": raise exceptions.InvalidEndpoint elif error == "invalid params": raise exceptions.InvalidParams elif error == "forbidden in standalone": raise exceptions.ForbiddenInStandalone elif error == "not implemented": raise NotImplementedError else: raise exceptions.PluginError else: # If it's not an error: # Verify the validity of the response. try: _ = response["endpoint"] _ = response["params"] except KeyError: raise exceptions.InvalidResponse return response def variable(variables: list) -> dict: """Gets a the value of a list of variables from the host. :param variables: List of variables to get the values of :type variables: List :raise: See _get_response(). :return: Dictionary, containing {key: value} pairs: {variable_name: value} """ _send_request("variable", variables) response = _get_response() return response["params"] def followup(question: str) -> None: """ Ask the user a followup question, and get their response. :param question: The question to ask the user. :type question: String :raise: See _get_response(). :return: """ _send_request("followup", question) response = _get_response() return response["params"] def simulate_client() -> client.Client: """Switches the way requests to and from the server are processed to use the I/O API instead of the Plugin API. NOTE: THIS IS IRREVERSIBLE! ONCE THE PLUGIN IS USING THE CLIENT API, IT CANNOT GO BACK TO THE PLUGIN API. :raise: See _get_response(). :return: Instance of Client """ _send_request("simulate client", "") _get_response() return client.Client() def log(level: str, message: str) -> None: """Logs `message` with the priority of `level` to the Log File and console, respecting the user's settings. `message` is always prefixed with the plugin's name before logging, so it's clear where it's coming from. :param level: The logging level, options are: debug :type level: :param message: :type message: :raise: :return: """ params = {"level": level, "message": message} _send_request("log", params) _get_response()
zeusai-py
/zeusai_py-0.0.1.1-py3-none-any.whl/zeusai_py/plugin/plugin.py
plugin.py
import socket import json import threading from zeusai_py.io import _socket_io class Client: def __init__(self, host: str, port: int) -> None: """ The class that handles connecting to and interacting with the ZeusAI server's I/O API. :param host: :param port: :return: """ self.host = host self.port = port self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.conn.connect((self.host, self.port)) self.reader = _socket_io.SocketStreamReader(self.conn) self.output_func = None self.recv_thread = threading.Thread(target=self._recv_loop) def authenticate(self, username: str, password: str) -> None: """ Authenticate with the ZeusAI server. :param username: :param password: :return: """ request_dict = {"endpoint": "auth", "params": {"user": username, "pass": password}} self._send_request(request_dict) def input(self, input_: str) -> None: """ Provide an input to the AI :param input_: String containing the user's input. :return: """ request_dict = {"endpoint": "input", "params": input_} self._send_request(request_dict) def set_output_func(self, func) -> None: """ Set the output function to func. :param func: a function to be called by the recv_thread when the AI provides an output to the user. Should take a param for a string containing the AI output. :return: None """ self.output_func = func def _get_response(self) -> dict: response = self.reader.readline() response = json.loads(response) return response def _send_request(self, request_dict: dict) -> None: """Serializes request_dict into a JSON bytes object and sends it to the server. :param request_dict: Dictionary containing a valid request for the ZeusAI Server API :return: None """ serialized_json = json.dumps(request_dict) + "\n" serialized_json = serialized_json.encode("utf8") self.conn.sendall(serialized_json) def _recv_loop(self) -> None: """ Loop which runs in a thread to get output from the AI server. Takes a set of functions which are called when the associated endpoint is called.""" while True: response = self._get_response() if response["endpoint"] == "output": self.output_func(response["params"])
zeusai-py
/zeusai_py-0.0.1.1-py3-none-any.whl/zeusai_py/io/client.py
client.py
__version__ = "0.1.1" __description__ = """Core utilites for https://app.zeusapi.io, a service that allows users to rapidly create microservices from legacy Python code.""" import functools from io import StringIO import json import pandas as pd def zeus_data( _func=None, *, name=None, file_type=None, in_library=True, local=True, zeus_payload={} ): """Define a function to be a data source. If local, we will load the data locally, as defined in the function; otherwise we will expect it to be passed in via the POST request.""" def decorator_zeus_data(func): @functools.wraps(func) def wrapper_zeus_data(*args, **kwargs): if local: # Let the usual function handle it. value = func(*args, **kwargs) else: # We will process the incoming, posted file. if file_type.lower() == "csv": # Create a pandas dataframe. datafile = zeus_payload[name].decode() datafile_io = StringIO(datafile) csv_dataframe = pd.read_csv(datafile_io) value = csv_dataframe elif file_type.lower() == "json": # Decode the data and return as a dictionary. value = zeus_payload[name] return value return wrapper_zeus_data if _func is None: return decorator_zeus_data else: return decorator_zeus_data(_func) def zeus_endpoint(_func=None, *, name=None): """Define a function to be a data source. If local, we will load the data locally; otherwise we will expect it to be passed in via the POST request.""" def decorator_zeus_endpoint(func): @functools.wraps(func) def wrapper_zeus_endpoint(*args, **kwargs): value = func(*args, **kwargs) # value["age"] = 45 return value return wrapper_zeus_endpoint if _func is None: return decorator_zeus_endpoint else: return decorator_zeus_endpoint(_func)
zeusapi
/zeusapi-0.1.1.tar.gz/zeusapi-0.1.1/zeusapi.py
zeusapi.py
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] WithSecure Oyj. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/LICENSE.md
LICENSE.md
from collections import defaultdict import z3 import logging from pprint import pformat import re import string from typing import List, Any, Union, Dict, Optional from zeuscloud_iamspy.iam import ( Document, GroupDetail, RoleDetail, Statements, Effects, AuthorizationDetails, UserDetail, ) from zeuscloud_iamspy.conditions import condition_functions from zeuscloud_iamspy.datatypes import parse_string from pydantic.json import pydantic_encoder # equivalient to chars in string.ascii_letters + string.digits + string.punctuation ANY = z3.Range("!", "~") logger = logging.getLogger("iamspy.parse") used_conditions = set() def remove_suffix(input_string, suffix): if suffix and input_string.endswith(suffix): return input_string[:-len(suffix)] return input_string def json_encoder(obj: Any, nested=True) -> Any: """ For printing results """ if nested: obj = pydantic_encoder(obj) if isinstance(obj, dict): for key in list(obj.keys()): if obj[key] is None: obj.pop(key) else: json_encoder(obj[key], False) elif isinstance(obj, list): while None in obj: obj.pop(obj.index(None)) for x in obj: json_encoder(x, False) return obj def _parse_condition(conditions: Dict[str, Dict[str, Union[str, List[str]]]]): """ Map IAM condition keys to the condition functions in conditions.py. Ands together all the conditions present. """ logger.debug(f"Parsing condition {pformat(conditions)}") items = [] for test, variables in conditions.items(): logger.debug(f"Condition test: {test}, variables: {variables}") if ":" in test: logger.warning( f"Multi key/value operator detected: {test}, this is currently not supported, skipping condition" ) continue if if_exists := test.endswith("IfExists"): test = remove_suffix(test, "IfExists") for key, value in variables.items(): logger.debug(f"Variable key: {key}, value: {value}") used_conditions.add(key) if not isinstance(value, list): value = [value] items.append(condition_functions[test](f"condition_{key}", value, if_exists=if_exists)) return z3.simplify(z3.And(*items)) def _parse_statement(statement: Statements): """ Parse an IAM statement block TODO: InvalidStatementError error handling """ logger.debug(f"Parsing statement {statement.Sid}") if isinstance(statement.Action, str): statement.Action = [statement.Action] if isinstance(statement.NotAction, str): statement.NotAction = [statement.NotAction] a = z3.String("a") if statement.Action: actions = z3.Or([parse_string(a, action) for action in statement.Action]) elif statement.NotAction: actions = z3.Not(z3.Or([parse_string(a, action) for action in statement.NotAction])) else: raise NotImplementedError() if isinstance(statement.Resource, str): statement.Resource = [statement.Resource] r = z3.String("r") if statement.Resource: resources = z3.Or([parse_string(r, resource) for resource in statement.Resource]) elif statement.NotResource: resources = z3.Or([z3.Not(parse_string(r, resource)) for resource in statement.NotResource]) else: resources = True s = z3.String("s") s_account = z3.String("s_account") if statement.Principal: # AWS Principals can not use wildcards # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html # except to specify All Principals # TODO: Makes these into an z3.Or if "AWS" in statement.Principal: items = [] for principal in statement.Principal["AWS"]: if principal == "*": items.append(True) continue if re.match(r"[0-9]{12}", principal): principal = f"arn:aws:iam::{principal}:root" try: account_id = principal.split(":")[4] except IndexError: account_id = "*" if principal.endswith(":root"): items.append(z3.And(z3.Bool("identity"), parse_string(s_account, account_id, wildcard=False))) else: items.append( z3.And(parse_string(s, principal, wildcard=False), s_account == z3.StringVal(account_id)) ) principals = z3.Or(*items) elif "Service" in statement.Principal: principals = z3.Or( [parse_string(s, principal, wildcard=False) for principal in statement.Principal["Service"]] ) elif "Federated" in statement.Principal: logger.warning(f"Federated principals {statement.Principal} currently not supported, skipping statement") principals = False else: raise NotImplementedError() else: principals = True if statement.Condition: conditions = _parse_condition(statement.Condition) else: conditions = True return z3.simplify(z3.And(actions, resources, principals, conditions)) def _parse_document(document: Document, identifier: str): """ Parse an IAM document of multiple statement """ # Conditions for a request to be allowed and denied respectively allow = [] deny = [] logger.info(f"Parsing policy document: {identifier}") doc = z3.Bool(identifier) # doc_allow and doc_deny are to allow querying later on for # what documents allow or deny what and where doc_allow = z3.Bool(f"allow_{identifier}") doc_deny = z3.Bool(f"deny_{identifier}") for stmt in document.Statement: parsed = _parse_statement(stmt) if stmt.Effect == Effects.ALLOW: allow.append(parsed) else: deny.append(parsed) return ( doc == z3.And(doc_allow, doc_deny), doc_allow == z3.simplify(z3.Or(*allow)), doc_deny == z3.simplify(z3.Not(z3.Or(*deny))), ) def _parse_group(data: AuthorizationDetails, group: GroupDetail): logger.info( f"Parsing {group.Arn} with {len(group.GroupPolicyList)} inline and {len(group.AttachedManagedPolicies)} managed policies" ) model = [] identifiers = [] for inline_policy in group.GroupPolicyList: logger.info(f"Parsing inline {group.Arn}_{inline_policy.PolicyName}") identifier = f"policy_identity_{group.Arn}_{inline_policy.PolicyName}" model.extend(_parse_document(inline_policy.PolicyDocument, identifier)) identifiers.append(identifier) testing.add(identifier) for managed_policy in group.AttachedManagedPolicies: logger.info(f"Parsing managed {group.Arn}_{managed_policy.PolicyName}") assert f"identity_{managed_policy.PolicyArn}" in testing identifiers.append(f"identity_{managed_policy.PolicyArn}") testing.add(f"identity_{group.Arn}") g = z3.Bool(f"identity_{group.Arn}") group_allow = z3.Bool(f"allow_identity_{group.Arn}") group_deny = z3.Bool(f"deny_identity_{group.Arn}") identifiers_allow = [z3.Bool(f"allow_{x}") for x in identifiers] identifiers_deny = [z3.Bool(f"deny_{x}") for x in identifiers] model.extend( ( g == z3.And(group_allow, group_deny), group_allow == z3.Or(*identifiers_allow), group_deny == z3.And(*identifiers_deny), ) ) return model def _parse_role(data: AuthorizationDetails, role: RoleDetail): logger.info( f"Parsing {role.Arn} with {len(role.RolePolicyList)} inline and {len(role.AttachedManagedPolicies)} managed policies" ) model = [] identifiers = [] for inline_policy in role.RolePolicyList: logger.info(f"Parsing inline {role.Arn}_{inline_policy.PolicyName}") identifier = f"policy_identity_{role.Arn}_{inline_policy.PolicyName}" model.extend(_parse_document(inline_policy.PolicyDocument, identifier)) identifiers.append(identifier) testing.add(identifier) for managed_policy in role.AttachedManagedPolicies: logger.info(f"Parsing managed {role.Arn}_{managed_policy.PolicyName}") assert f"identity_{managed_policy.PolicyArn}" in testing identifiers.append(f"identity_{managed_policy.PolicyArn}") testing.add(f"identity_{role.Arn}") r = z3.Bool(f"identity_{role.Arn}") role_allow = z3.Bool(f"allow_identity_{role.Arn}") role_deny = z3.Bool(f"deny_identity_{role.Arn}") identifiers_allow = [z3.Bool(f"allow_{x}") for x in identifiers] identifiers_deny = [z3.Bool(f"deny_{x}") for x in identifiers] permissions_boundary = z3.Bool(f"permissions_boundary_{role.Arn}") # Permissions boundaries never allow - they only deny if the policy does not allow if role.PermissionsBoundary: assert f"identity_{role.PermissionsBoundary.PermissionsBoundaryArn}" in testing permissions_boundary_constraint = permissions_boundary == z3.Bool( f"identity_{role.PermissionsBoundary.PermissionsBoundaryArn}" ) model.append(permissions_boundary_constraint) model.extend(parse_resource_policy(role.Arn, role.AssumeRolePolicyDocument)) model.extend( ( r == z3.And(role_allow, role_deny, permissions_boundary), role_allow == z3.Or(*identifiers_allow), role_deny == z3.And(*identifiers_deny), ) ) return model def _parse_user(data: AuthorizationDetails, user: UserDetail): logger.info( f"Parsing {user.Arn} with {len(user.UserPolicyList)} inline and {len(user.AttachedManagedPolicies)} managed policies, {len(user.GroupList)} groups" ) model = [] identifiers = [] for inline_policy in user.UserPolicyList: identifier = f"policy_identity_{user.Arn}_{inline_policy.PolicyName}" model.extend(_parse_document(inline_policy.PolicyDocument, identifier)) identifiers.append(identifier) testing.add(identifier) for managed_policy in user.AttachedManagedPolicies: assert f"identity_{managed_policy.PolicyArn}" in testing identifiers.append(f"identity_{managed_policy.PolicyArn}") for group_name in user.GroupList: group = next(x for x in data.GroupDetailList if x.GroupName == group_name) assert f"identity_{group.Arn}" in testing identifiers.append(f"identity_{group.Arn}") testing.add(f"identity_{user.Arn}") u = z3.Bool(f"identity_{user.Arn}") user_allow = z3.Bool(f"allow_identity_{user.Arn}") user_deny = z3.Bool(f"deny_identity_{user.Arn}") identifiers_allow = [z3.Bool(f"allow_{x}") for x in identifiers] identifiers_deny = [z3.Bool(f"deny_{x}") for x in identifiers] permissions_boundary = z3.Bool(f"permissions_boundary_{user.Arn}") # Permissions boundaries never allow - they only deny if the policy does not allow if user.PermissionsBoundary: assert f"identity_{user.PermissionsBoundary.PermissionsBoundaryArn}" in testing permissions_boundary_constraint = permissions_boundary == z3.Bool( f"identity_{user.PermissionsBoundary.PermissionsBoundaryArn}" ) model.append(permissions_boundary_constraint) model.extend( ( u == z3.And(user_allow, user_deny, permissions_boundary), user_allow == z3.Or(*identifiers_allow), user_deny == z3.And(*identifiers_deny), ) ) return model def parse_resource_policy(arn: str, doc: Document, account_id: Optional[str] = None): logger.info(f"Parsing resource policy for {arn}") if account_id is None: account_id = arn.split(":")[4] if not account_id: raise Exception(f"Missing account id for {arn}") return [ parse_string(z3.String(f"resource_{arn}_account"), account_id), *_parse_document(doc, f"resource_{arn}"), ] testing = set() def generate_model(data: AuthorizationDetails): """ Parses a GAAD, pulls out policies, users, groups etc """ logger.info( f"Generating model from GAAD output with {len(data.UserDetailList)} users, {len(data.GroupDetailList)} groups, {len(data.RoleDetailList)} roles, {len(data.Policies)} policies" ) model = [] for policy in data.Policies: document = next(x for x in policy.PolicyVersionList if x.IsDefaultVersion).Document model.extend(_parse_document(document, f"identity_{policy.Arn}")) testing.add(f"identity_{policy.Arn}") for group in data.GroupDetailList: model.extend(_parse_group(data, group)) for role in data.RoleDetailList: model.extend(_parse_role(data, role)) for user in data.UserDetailList: model.extend(_parse_user(data, user)) logger.info(f"Used condition keys: {used_conditions}") return model def generate_evaluation_logic_checks(model_vars, source: Optional[str], resource: str): logger.info(f"Generating evaluation logic checks for {source} against {resource}") constraints = [] s_account = z3.String("s_account") s = z3.String("s") constraints.append(s_account == z3.SubString(s, 13, 12)) resource_account = resource.split(":")[4] if resource_account: constraints.append(z3.String("r_account") == z3.StringVal(resource_account)) else: constraints.append(z3.String("r_account") == z3.String(f"resource_{resource}_account")) # SCPs # Resource Policy resource_identifier = f"resource_{resource}" resource_check = z3.Bool(resource_identifier) constraints.append(z3.Bool("resource") == resource_check) constraints.append(z3.Bool(f"deny_resource_{resource}") == True) # noqa: E712 if resource.startswith("arn:aws:s3:::") and "/" in resource: bucket_resource = resource.split("/")[0] logger.info(f"Associating {bucket_resource} policy with bucket object {resource}") constraints.append(z3.Bool(f"resource_{resource}") == z3.Bool(f"resource_{bucket_resource}")) constraints.append(z3.Bool(f"allow_resource_{resource}") == z3.Bool(f"allow_resource_{bucket_resource}")) constraints.append(z3.Bool(f"deny_resource_{resource}") == z3.Bool(f"deny_resource_{bucket_resource}")) constraints.append( z3.String(f"resource_{resource}_account") == z3.String(f"resource_{bucket_resource}_account") ) resource_identifier = f"resource_{bucket_resource}" if resource_identifier not in model_vars: logger.debug(f"Missing resource policy for {resource_identifier}, defaulting to False") constraints.append(resource_check == False) # noqa: E712 # Identity Policy identity_identifier = f"identity_{source}" identity_check = z3.And(z3.Bool(identity_identifier), z3.Bool(f"deny_identity_{source}")) if source: if identity_identifier not in model_vars: constraints.append(identity_check == False) # noqa: E712 source_account = source.split(":")[4] constraints.append(z3.String("s_account") == z3.StringVal(source_account)) else: identities = [x for x in model_vars if x.startswith("identity")] identities = [ x for x in identities if len(x.split(":")) > 4 and (x.split(":")[5].startswith("user") or x.split(":")[5].startswith("role")) ] identity_identifiers = [ z3.And( z3.Bool(x), z3.Bool(f"deny_{x}"), s == x.lstrip("identity_"), z3.String("s_account") == z3.StringVal(x.split(":")[4]), ) for x in identities ] identity_check = z3.Or(*identity_identifiers) # TODO: This is a temporary fix for whocan, at some point need to expand this to do automatic wildcard resolution # for accounts external to known constraints.append( z3.Or(*[parse_string(s, x.lstrip("identity_"), wildcard=False, case_sensitive=True) for x in identities]) ) constraints.append(z3.Bool("identity") == identity_check) # Boundary Policy # Session Policy constraints.append(z3.Or(resource_check, identity_check)) constraints.append( # TODO: Add further cases where resource policy is always required z3.Or( z3.And(parse_string(z3.String("a"), "sts:assumerole"), resource_check), z3.Not(parse_string(z3.String("a"), "sts:assumerole")), ) ) constraints.append( z3.Or( z3.And(z3.String("s_account") != z3.String("r_account"), resource_check), z3.String("s_account") == z3.String("r_account"), ) ) logger.debug(f"Evaluation logic constraints: {constraints}") return constraints
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/parse.py
parse.py
from pydantic import Field, validator from pydantic.dataclasses import dataclass from datetime import datetime from typing import Optional, List, Dict, Union, Any from enum import Enum class Effects(Enum): ALLOW = "Allow" DENY = "Deny" @dataclass class Statements: Sid: Optional[str] = None Effect: Effects = Field(Effects.DENY) Principal: Optional[Dict[str, List[str]]] = None NotPrincipal: Optional[Dict[str, List[str]]] = None Action: Optional[Union[str, List[str]]] = None NotAction: Optional[Union[str, List[str]]] = None Resource: Optional[Union[str, List[str]]] = None NotResource: Optional[Union[str, List[str]]] = None Condition: Optional[Dict[str, Dict[str, Union[str, List[str]]]]] = None @validator("Principal", pre=True) def principal_is_list(cls, v): if not v: return v if isinstance(v, str): v = {"AWS": v} for key, value in v.items(): if isinstance(value, str): v[key] = [value] return v @validator("NotPrincipal", pre=True) def notprincipal_is_list(cls, v): if not v: return v if isinstance(v, str): v = {"AWS": v} for key, value in v.items(): if isinstance(value, str): v[key] = [value] return v @validator("NotAction", always=True) def at_least_action_or_not_action(cls, v, values, **kwargs): if not ((values.get("Action", None) is not None) ^ (v is not None)): raise ValueError("At least one of Action and NotAction must be specified") return v @dataclass class Document: Version: Optional[str] = "2008-10-17" Id: Optional[str] = None Statement: List[Statements] = Field(default_factory=list) @validator("Statement", pre=True) def make_sure_statements_is_list(cls, v): if not isinstance(v, list): return [v] return v @dataclass class Policy: PolicyName: str PolicyDocument: Document @dataclass class ManagedPolicy: PolicyName: str PolicyArn: str @dataclass class PermissionBoundary: PermissionsBoundaryType: str = Field(..., regex="^Policy$") PermissionsBoundaryArn: str = Field(...) @dataclass class Tag: Key: str Value: str @dataclass class UserDetail: Path: str UserName: str UserId: str Arn: str CreateDate: datetime UserPolicyList: List[Policy] = Field(default_factory=list) GroupList: List[str] = Field(default_factory=list) AttachedManagedPolicies: List[ManagedPolicy] = Field(default_factory=list) PermissionsBoundary: Optional[PermissionBoundary] = None Tags: List[Tag] = Field(default_factory=list) @dataclass class GroupDetail: Path: str GroupName: str GroupId: str Arn: str CreateDate: datetime GroupPolicyList: List[Policy] AttachedManagedPolicies: List[ManagedPolicy] @dataclass class RoleLastUse: LastUsedDate: Optional[datetime] = None Region: Optional[str] = None @dataclass class RoleDetail: Path: str RoleName: str RoleId: str Arn: str CreateDate: datetime AssumeRolePolicyDocument: Document InstanceProfileList: List[Any] # We don't care about this yet RolePolicyList: List[Policy] = Field(default_factory=list) AttachedManagedPolicies: List[ManagedPolicy] = Field(default_factory=list) PermissionsBoundary: Optional[PermissionBoundary] = None Tags: List[Tag] = Field(default_factory=list) RoleLastUsed: RoleLastUse = Field(...) @dataclass class PolicyVersion: Document: Document VersionId: str IsDefaultVersion: bool CreateDate: datetime @dataclass class PolicyDetail: PolicyName: str PolicyId: str Arn: str Path: str DefaultVersionId: str AttachmentCount: int PermissionsBoundaryUsageCount: int IsAttachable: bool Description: str = Field("") CreateDate: datetime = Field(...) UpdateDate: datetime = Field(...) PolicyVersionList: List[PolicyVersion] = Field(...) @dataclass class AuthorizationDetails: UserDetailList: List[UserDetail] GroupDetailList: List[GroupDetail] RoleDetailList: List[RoleDetail] Policies: List[PolicyDetail] @dataclass class ResourcePolicy: Resource: str Policy: Document Account: Optional[str] = Field(None) def extract_applicable_policies(data: AuthorizationDetails, source_arn: str) -> List[Document]: """ For any given ARN, go through the GAAD, find all policies that apply to an ARN """ source_type = source_arn.split(":")[5].split("/")[0] source: Union[RoleDetail, UserDetail] if source_type == "user": try: source = next(x for x in data.UserDetailList if x.Arn == source_arn) inline_policies = source.UserPolicyList except StopIteration: raise ValueError("Can't find Source ARN") elif source_type == "role": try: source = next(x for x in data.RoleDetailList if x.Arn == source_arn) inline_policies = source.RolePolicyList except StopIteration: raise ValueError("Can't find Source ARN") applicable_policies = [] for managed_policy in source.AttachedManagedPolicies: policy_arn = managed_policy.PolicyArn try: policy_details = next(x for x in data.Policies if policy_arn == x.Arn) policy_version = next(x for x in policy_details.PolicyVersionList if x.IsDefaultVersion) except StopIteration: continue applicable_policies.append(policy_version.Document) for inline_policy in inline_policies: applicable_policies.append(inline_policy.PolicyDocument) if isinstance(source, UserDetail): for name in source.GroupList: try: group = next(x for x in data.GroupDetailList if x.GroupName == name) except StopIteration: continue for managed_policy in group.AttachedManagedPolicies: policy_arn = managed_policy.PolicyArn try: policy_details = next(x for x in data.Policies if policy_arn == x.Arn) policy_version = next(x for x in policy_details.PolicyVersionList if x.IsDefaultVersion) except StopIteration: continue applicable_policies.append(policy_version.Document) for policy in group.GroupPolicyList: applicable_policies.append(policy.PolicyDocument) return applicable_policies
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/iam.py
iam.py
import logging import base64 import ipaddress import string import z3 from dateutil.parser import parse # equivalient to chars in string.ascii_letters + string.digits + string.punctuation ANY = z3.Range("!", "~") logger = logging.getLogger("iamspy.datatypes") def convert(type, data, **kwargs): return _converters[type](data, **kwargs) def parse_string(item, string, wildcard=True, case_sensitive=False): """ Parsing strings to unpack wildcards, so that the end result is a set of components that are either regexes of strings or z3.Star() wildcard representations, to support wildcards in ARNs and permissions """ # logger.debug(f"Parsing {item} as string: {string}") if wildcard: regex = _regex_parse_string(string, case_sensitive=case_sensitive) else: if case_sensitive: regex = z3.Re(string) else: regex = z3.Re(string.lower()) return z3.InRe(item, regex) def _regex_parse_string(string, case_sensitive=False): def intersperse(parts, char): output = [char] * (len(parts) * 2 - 1) output[0::2] = parts return output star_parts = intersperse(string.split("*"), "*") parts = [] for part in star_parts: if "?" in part: question_split = intersperse(part.split("?"), "?") parts.extend(question_split) else: parts.append(part) regex_parts = [] for part in parts: if part == "*": regex_parts.append(z3.Star(ANY)) elif part == "?": regex_parts.append(ANY) else: if part == "" or case_sensitive: chars = z3.Re(part) else: chars = z3.Re(part.lower()) regex_parts.append(chars) if len(regex_parts) == 1: regex = z3.simplify(regex_parts[0]) else: regex = z3.simplify(z3.Concat(*regex_parts)) return regex def _arn(data): parts = data.split(":", 5) return [_regex_string(x) for x in parts] def _bool(data): if "true" == str(data.lower()): return z3.BoolVal(True) elif "false" == str(data.lower()): return z3.BoolVal(False) else: raise TypeError(f"Invalid Bool: {data}") def _date(data): return int(parse(data).timestamp()) def _numeric(data): return z3.IntVal(data) def _string(data): return z3.StringVal(data) def _regex_string(data, case_sensitive=False): return _regex_parse_string(data, case_sensitive=case_sensitive) def _ip(data): ip = ipaddress.ip_network(data, strict=False) return z3.BitVecVal(int(ip[0]), ip.max_prefixlen), z3.BitVecVal(int(ip.netmask), ip.max_prefixlen) _converters = { "Arn": _arn, "Bool": _bool, "Date": _date, "Numeric": _numeric, "String": _string, "RegexString": _regex_string, "IpNetwork": _ip, }
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/datatypes.py
datatypes.py
from typing import Dict, List, Optional, Set import logging import json import os import z3 import hashlib from zeuscloud_iamspy.iam import AuthorizationDetails, ResourcePolicy from zeuscloud_iamspy import parse from zeuscloud_iamspy.datatypes import parse_string from zeuscloud_iamspy.utils import get_conditions, get_vars logger = logging.getLogger("iamspy.model") class Model: def __init__(self): self.solver = z3.Solver() self._model_vars = None def __enter__(self): new_solver = z3.Solver() new_solver.add(*list(self.solver.assertions())) return new_solver def __exit__(self, exc_type, exc_value, exc_traceback): pass def save(self, filename: str): """ Save a generated Z3 model to a file """ output = self.solver.to_smt2() with open(filename, "w") as fs: fs.write(output) def load_model(self, filename: str): """ Load an existing Z3 model from a file. """ self.solver.from_file(filename) self._model_vars = None def load_gaad(self, filename: str) -> AuthorizationDetails: """ Load the output of `aws iam get-account-authorization-details` Returns a python object representation of the JSON doc, after adding the model to the Z3 solver. """ return self.load_gaad_json(json.load(open(filename))) def load_gaad_json(self, gaad_json: Dict) -> AuthorizationDetails: auth_details = AuthorizationDetails(**gaad_json) conditions = parse.generate_model(auth_details) self.solver.add(*conditions) self._model_vars = None return auth_details def load_resource_policies(self, filename: str) -> None: """ Load resource policies in from a JSON file """ return self.load_resource_policies_json(json.load(open(filename))) def load_resource_policies_json(self, resource_policies_json: List) -> AuthorizationDetails: policies = [ResourcePolicy(**item) for item in resource_policies_json] for policy in policies: self.solver.add(*parse.parse_resource_policy(policy.Resource, policy.Policy, policy.Account)) self._model_vars = None @property def model_vars(self): # Try loading from file if self._model_vars is None: try: with open("model.vars") as fs: data = fs.read() h, v = data.split("\n", 1) if h == self.hash: logger.info("Loading model vars from model.vars") self._model_vars = set(v.split("\n")) except FileNotFoundError: pass # Re-generate the model vars if self._model_vars is None: self._model_vars = get_vars(list(self.solver.assertions())) with open("model.vars", "w") as fs: logger.info("Saving model vars to model.vars") fs.write(self.hash + "\n") fs.write("\n".join(list(self._model_vars))) return self._model_vars @property def hash(self): return hashlib.md5(self.solver.to_smt2().encode()).hexdigest() def generate_evaluation_logic_checks(self, source: Optional[str], resource: str): """ Generate the assertions for the model """ return parse.generate_evaluation_logic_checks(self.model_vars, source, resource) def _generate_query_conditions( self, source: Optional[str], action: str, resource: str, conditions: Optional[List[str]] = None, condition_file: Optional[str] = None, strict_conditions: bool = False, model_conditions: Set[str] = set(), ): if conditions is None: conditions = [] output = self.generate_evaluation_logic_checks(source, resource) s, a, r = z3.Strings("s a r") if source is not None: logger.debug(f"Adding constraint source is {source}") output.append(parse_string(s, source, wildcard=False)) logger.debug(f"Adding constraint action is {action}") logger.debug(f"Adding constraint resource is {resource}") output.append(parse_string(a, action, wildcard=False)) output.append(parse_string(r, resource, wildcard=False)) provided_conditions = set() for condition in conditions: key, value = condition.split("=") logger.debug(f"Adding constraint to set {key} condition as {value}") provided_conditions.add(key) output.append(z3.String(f"condition_{key}") == z3.StringVal(value)) if condition_file: logger.debug(f"Parsing {condition_file}") condition_file_data = json.load(open(condition_file)) output.append(parse._parse_condition(condition_file_data)) for test, variables in condition_file_data.items(): for key, value in variables.items(): provided_conditions.add(key) if strict_conditions: logger.debug(f"Non existent conditions from request are: {model_conditions - provided_conditions}") for condition in model_conditions - provided_conditions: output.append(z3.Bool(f"condition_{condition}_exists") == False) for condition in provided_conditions: output.append(z3.Bool(f"condition_{condition}_exists")) return output def can_i( self, source: str, action: str, resource: str, conditions: List[str] = [], condition_file: Optional[str] = None, strict_conditions: bool = False, debug: bool = False, ) -> bool: """ Used by the CLI to provide the can-i call. """ with self as solver: logger.debug("Identifying model conditions") model_conditions = get_conditions(self.model_vars) logger.debug(f"Model conditions identified as: {model_conditions}") query_conditions = self._generate_query_conditions( source=source, action=action, resource=resource, conditions=conditions, condition_file=condition_file, strict_conditions=strict_conditions, model_conditions=model_conditions, ) solver.add(*query_conditions) if debug: return solver else: return solver.check() == z3.sat def who_can( self, action: str, resource: str, conditions: List[str] = [], condition_file: Optional[str] = None, strict_conditions: bool = False, ) -> List[str]: """ Used by the CLI to provide the who-can call. """ with self as solver: logger.debug("Identifying model conditions") model_conditions = get_conditions(self.model_vars) logger.debug(f"Model conditions identified as: {model_conditions}") query_conditions = self._generate_query_conditions( source=None, action=action, resource=resource, conditions=conditions, condition_file=condition_file, strict_conditions=strict_conditions, model_conditions=model_conditions, ) solver.add(*query_conditions) sat = solver.check() == z3.sat sources = [] while sat: s = z3.String("s") m = solver.model() source = m[s] sources.append(str(source)[1:-1]) solver.add(s != source) sat = solver.check() == z3.sat return sources
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/model.py
model.py
import logging import z3 from zeuscloud_iamspy.datatypes import convert logger = logging.getLogger("iamspy.conditions") """ IAM Condition handling functions are defined here IAM condition operators: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html Conditions with multiple keys or values: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_multi-value-conditions.html TODO: Multi key/value sets TODO: Policy Variables: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html """ def if_exists(func): def wrapper(key, value, if_exists=False): condition = func(key, value) if not if_exists: condition = z3.And(z3.Bool(f"{key}_exists"), condition) return condition return wrapper @if_exists def string_equals(key, values): """ StringEquals condition key Exact matching, case sensitive """ logger.debug(f"string_equals condition: {key}, {values}") return z3.Or(*[z3.String(key) == convert("String", x) for x in values]) @if_exists def string_not_equals(key, values): """ StringNotEquals condition key Negated matching """ logger.debug(f"string_not_equals condition: {key}, {values}") return z3.Not(string_equals(key, values)) @if_exists def string_equals_ignore_case(key, values): """ StringEqualsIgnoreCase condition key Exact matching, ignoring case """ logger.debug(f"string_equals_ignore_case condition: {key}, {values}") return string_equals(key.lower(), [x.lower() for x in values]) @if_exists def string_not_equals_ignore_case(key, values): """ StringNotEqualsIgnoreCase condition key Negated matching, ignoring case """ logger.debug(f"string_not_equals_ignore_case condition: {key}, {values}") return z3.Not(string_equals_ignore_case(key, values)) @if_exists def string_like(key, values): """ StringLike condition key Case-sensitive matching. The values can include multi-character match wildcards (*) and single-character match wildcards (?) anywhere in the string. Note If a key contains multiple values, StringLike can be qualified with set operators—ForAllValues:StringLike and ForAnyValue:StringLike. """ logger.debug(f"string_like condition: {key}, {values}") return z3.Or(*[z3.InRe(z3.String(key), convert("RegexString", x, case_sensitive=True)) for x in values]) @if_exists def string_not_like(key, values): """ StringNotLike condition key Negated case-sensitive matching. The values can include multi-character match wildcards (*) or single-character match wildcards (?) anywhere in the string. """ logger.debug(f"string_not_like condition: {key}, {values}") return z3.Not(string_like(key, values)) @if_exists def numeric_equals(key, values): """ NumericEquals condition key True if input number matches value specified in policy """ logger.debug(f"numeric_equals condition: {key}, {values}") return z3.Or(*[z3.Int(key) == convert("Numeric", x) for x in values]) @if_exists def numeric_not_equals(key, values): """ NumericNotEquals condition key True if input number does not match value specified in policy """ logger.debug(f"numeric_not_equals condition: {key}, {values}") return z3.Not(numeric_equals(key, values)) @if_exists def numeric_less_than(key, values): """ NumericLessThan condition key True if input less than value """ logger.debug(f"numeric_less_than condition: {key}, {values}") return z3.Or(*[z3.Int(key) < convert("Numeric", x) for x in values]) @if_exists def numeric_less_than_equals(key, values): """ NumericLessThanEquals condition key True if input less than or equal to value """ logger.debug(f"numeric_less_than_equals condition: {key}, {values}") return z3.Or(*[z3.Int(key) <= convert("Numeric", x) for x in values]) @if_exists def numeric_greater_than(key, values): """ NumericGreaterThan condition key True if input greater than value """ logger.debug(f"numeric_greater_than condition: {key}, {values}") return z3.Or(*[z3.Int(key) > convert("Numeric", x) for x in values]) @if_exists def numeric_greater_than_equals(key, values): """ NumericGreaterThanEquals condition key True if input greater than or equal to value """ logger.debug(f"numeric_greater_than_equals condition: {key}, {values}") return z3.Or(*[z3.Int(key) >= convert("Numeric", x) for x in values]) @if_exists def date_equals(key, values): """ DateEquals Matching a specific date """ logger.debug(f"date_equals condition: {key}, {values}") return numeric_equals(key, [convert("Date", x) for x in values]) @if_exists def date_not_equals(key, values): """ DateNotEquals Negated matching """ logger.debug(f"date_not_equals condition: {key}, {values}") return z3.Not(date_equals(key, values)) @if_exists def date_less_than(key, values): """ DateLessThan Matching before a specific date and time """ logger.debug(f"date_less_than condition: {key}, {values}") return numeric_less_than(key, [convert("Date", x) for x in values]) @if_exists def date_less_than_equals(key, values): """ DateLessThanEquals Matching at or before a specific date and time """ logger.debug(f"date_less_than_equals condition: {key}, {values}") return numeric_less_than_equals(key, [convert("Date", x) for x in values]) @if_exists def date_greater_than(key, values): """ DateGreaterThan Matching after a specific a date and time """ logger.debug(f"date_greater_than condition: {key}, {values}") return numeric_greater_than(key, [convert("Date", x) for x in values]) @if_exists def date_greater_than_equals(key, values): """ DateGreaterThanEquals Matching at or after a specific date and time """ logger.debug(f"date_greater_than_equals condition: {key}, {values}") return numeric_greater_than_equals(key, [convert("Date", x) for x in values]) @if_exists def bool_match(key, values): """ Bool Boolean matching """ logger.debug(f"bool condition: {key}, {values}") return z3.Or(*[z3.Bool(key) == convert("Bool", x) for x in values]) @if_exists def binary_equals(key, values): """ BinaryEquals The BinaryEquals condition operator let you construct Condition elements that test key values that are in binary format. It compares the value of the specified key byte for byte against a base-64 encoded representation of the binary value in the policy. """ logger.debug(f"binary_equals condition: {key}, {values}") # As these are base64 encoded strings, for now we are just passing these as string # comparisons on the encoded data. if_exists is set to True to bypass the additional # if_exists=False logic from the string function that would be done by the binary equals # if_exists decorator return string_equals(key, values, if_exists=True) @if_exists def ip_address(key, values): """ IpAddress The specified IP address or range """ logger.debug(f"ip_address condition: {key}, {values}") parts = [] for x in values: base_ip, netmask = convert("IpNetwork", x) parts.append((z3.BitVec(key, netmask.size()) & netmask) == base_ip) return z3.Or(*parts) @if_exists def not_ip_address(key, values): """ NotIpAddress All IP addresses except the specified IP address or range """ logger.debug(f"not_ip_address condition: {key}, {values}") return z3.Not(ip_address(key, values)) @if_exists def arn_equals(key, values): """ ArnEquals Case-sensitive matching of the ARN. Each of the six colon-delimited components of the ARN is checked separately and each can include multi-character match wildcards (*) or single-character match wildcards (?). The ArnEquals and ArnLike condition operators behave identically. """ logger.debug(f"arn_equals condition: {key}, {values}") suffixes = ["arn", "partition", "service", "region", "account", "resource"] parts = [] for x in values: parts.append( z3.And( *[z3.InRe(z3.String(f"{key}_{suffix}"), regex) for suffix, regex in zip(suffixes, convert("Arn", x))] ) ) return z3.Or(*parts) @if_exists def arn_like(key, values): """ ArnLike Case-sensitive matching of the ARN. Each of the six colon-delimited components of the ARN is checked separately and each can include multi-character match wildcards (*) or single-character match wildcards (?). The ArnEquals and ArnLike condition operators behave identically. """ logger.debug(f"arn_like condition: {key}, {values}") return arn_equals(key, values) @if_exists def arn_not_equals(key, values): """ ArnNotEquals Negated matching for ARN. The ArnNotEquals and ArnNotLike condition operators behave identically. """ logger.debug(f"arn_not_equals condition: {key}, {values}") return z3.Not(arn_equals(key, values)) @if_exists def arn_not_like(key, values): """ ArnNotLike Negated matching for ARN. The ArnNotEquals and ArnNotLike condition operators behave identically. """ logger.debug(f"arn_not_like condition: {key}, {values}") return arn_not_equals(key, values) @if_exists def null(key, values): """ Null Checks existence of condition keys """ logger.debug(f"null condition: {key}, {values}") values = [False if x == "true" else True for x in values] return z3.Or(*[z3.Bool(f"{key}_exists") == x for x in values]) condition_functions = { # String condition operators "StringEquals": string_equals, "StringNotEquals": string_not_equals, "StringEqualsIgnoreCase": string_equals_ignore_case, "StringNotEqualsIgnoreCase": string_not_equals_ignore_case, "StringLike": string_like, "StringNotLike": string_not_like, # Numeric condition operators "NumericEquals": numeric_equals, "NumericNotEquals": numeric_not_equals, "NumericLessThan": numeric_less_than, "NumericLessThanEquals": numeric_less_than_equals, "NumericGreaterThan": numeric_greater_than, "NumericGreaterThanEquals": numeric_greater_than_equals, # Date condition operators "DateEquals": date_equals, "DateNotEquals": date_not_equals, "DateLessThan": date_less_than, "DateLessThanEquals": date_less_than_equals, "DateGreaterThan": date_greater_than, "DateGreaterThanEquals": date_greater_than_equals, # Bool condition operators "Bool": bool_match, # Binary condition operators "BinaryEquals": binary_equals, # IP address condition operators "IpAddress": ip_address, "NotIpAddress": not_ip_address, # ARN condition operators "ArnEquals": arn_equals, "ArnLike": arn_like, "ArnNotEquals": arn_not_equals, "ArnNotLike": arn_not_like, # Null condition operator "Null": null, }
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/conditions.py
conditions.py
import logging from typing import List, Optional import typer from zeuscloud_iamspy.model import Model from pathlib import Path from zeuscloud_iamspy.log_config import build_logger app = typer.Typer() @app.command() def load_gaad(gaad: str = typer.Argument(...), model: str = typer.Option("model.smt2", "-f")): m = Model() if Path(model).is_file(): m.load_model(model) m.load_gaad(gaad) m.save(model) @app.command() def load_resources(resources: str = typer.Argument(...), model: str = typer.Option("model.smt2", "-f")): m = Model() if Path(model).is_file(): m.load_model(model) m.load_resource_policies(resources) m.save(model) @app.command() def can_i( source_arn: str = typer.Argument(...), action: str = typer.Argument(...), resource: str = typer.Argument(...), conditions: List[str] = typer.Option([], "-c", help="List of conditions as key=value string pairs"), condition_file: Optional[str] = typer.Option( None, "-C", help="File of conditions to load following IAM condition syntax" ), strict_conditions: bool = typer.Option( False, help="Whether to require conditions to be passed in for any IAM condition checks" ), model: str = typer.Option("model.smt2", "-f"), ): """ Pulls out applicable policies, runs can_i """ m = Model() if Path(model).is_file(): m.load_model(model) print(m.can_i(source_arn, action, resource, conditions, condition_file, strict_conditions)) @app.command() def who_can( action: str = typer.Argument(...), resource: str = typer.Argument(...), conditions: List[str] = typer.Option([], "-c", help="List of conditions as key=value string pairs"), condition_file: Optional[str] = typer.Option( None, "-C", help="File of conditions to load following IAM condition syntax" ), strict_conditions: bool = typer.Option( False, help="Whether to require conditions to be passed in for any IAM condition checks" ), model: str = typer.Option("model.smt2", "-f"), ): """ Pulls out applicable policies, runs who_can """ m = Model() if Path(model).is_file(): m.load_model(model) print("\n".join(m.who_can(action, resource, conditions, condition_file, strict_conditions))) @app.callback() def main(verbose: int = typer.Option(0, "--verbose", "-v", count=True)): """ CLI interface for iamspy, the AWS IAM analysis framework """ verbosity_levels = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, } build_logger(verbosity_levels[verbose])
zeuscloud-iamspy
/zeuscloud_iamspy-0.8.0.tar.gz/zeuscloud_iamspy-0.8.0/zeuscloud_iamspy/cli.py
cli.py
import time import threading from . import zeus_rblib from .zeus_error import * # for module check # import zeus_rblib # from zeus_error import * class _ZeusIO(object): """ ZeusIO 클래스 digital IO를 제어 Methods: [Public] open(self, *args): 로봇 연결 초기화 (RobotClient 오픈) close(self): 로봇 연결 해제 wait(self, adr, data, wait_time): 해당 어드레스의 IO가 data가 될때까지 wait_time 시간동안 대기 dout(self, adr, data, delay=0): Digital Output 신호 제어 din(self, *adr): Digital Input 신호 수신 [Internal] _dout(self, adr, data): Digital Output 메서드 _din(self, *adr): Digital Input 메서드 _pars(self, st): 문자열을 체크하여 1, 0, *, n, s, d만이 포함된 문자열로 변환 __senddata(self, col, d0int, m0int, d1int, m1int): Digital Output 데이터 전송 __rcvd(self, col): col 주소의 64bit 데이터를 수신 __replace(self, st, ch, n): 오른쪽에서 n번째 글자를 변경 __bsNot(self, p): bitstream의 not 연산 __bsAnd(self, p, q): bitstream의 and 연산 __bs2i(self, st): bitstream 형식의 str 데이터를 int 형으로 반환 __i2bs(self, n): integer -> 32자리 bit stream 으로 변환 (1 Word) """ def __init__(self): self.__flag = 0 self.__rb = None self.open() self._lock = threading.Lock() def __del__(self): if self.__flag == 1: if self.__rb is not None: self.__rb.close() self.__flag = 0 def open(self, *args): """ 로봇 연결 초기화 Args: *args: IP 주소(str), 포트 번호(int) 기본값 : '192.168.0.23', 12345 Returns: bool : 수행 결과 """ self.__flag = 0 try: if len(args) == 0: self.__rb = zeus_rblib.RobotClient('192.168.0.23', 12345) self.__rb.open(3) self.__flag = 1 elif len(args) == 1: if isinstance(args[0], zeus_rblib.RobotClient): self.__rb = args[0] self.__flag = 0 # is'nt opened by IO elif isinstance(args[0], str): self.__rb = zeus_rblib.RobotClient(args[0], 12345) self.__rb.open() self.__flag = 1 else: raise ZeusIOException(ERR_ZERO_IO, 1,"open") elif len(args) == 2: if isinstance(args[0],str) and isinstance(args[1],int): self.__rb = zeus_rblib.RobotClient(args[0], args[1]) self.__rb.open() self.__flag = 1 else: raise ZeusIOException(ERR_ZERO_IO, 1,"open") else: raise ZeusIOException(ERR_ZERO_IO, 1,"open") except Exception as e: raise ZeusIOException(ERR_ZERO_IO, 2, "open", e) return True def close(self): try: self.__flag == 1 if self.__rb is not None: self.__rb.close() self.__flag = 0 except Exception as e: raise ZeusIOException(ERR_ZERO_IO, 3, "close", e) return True def wait(self, adr, data, wait_time): """ 해당 어드레스의 IO가 data가 될때까지 wait_time 시간동안 대기 Args: adr(int): input 포트의 시작 번호 data(str): input 값. ex)'1001' * '1' = ON * '0' = OFF wait_time(float): 대기 시간 Returns: """ try: ts = time.time() while -1: dlen = len(data) if dlen == 1: # '1' or '0' dbit = self.din(adr) getd = dbit dat = self._pars(data)[1] else: datar = self._pars(data) mask = datar[2] dat = datar[1] getd = self.din(adr, adr + len(data) - 1) dbit = self.__bsAnd(getd, self.__bsNot(mask)) if dbit == dat: # 읽어온 입력값 == wait 입력값 return [True, getd, time.time() - ts] time.sleep(0.05) if time.time() - ts >= wait_time: # 대기 시간 초과 시 return [False, getd, time.time() - ts] except Exception as e: raise ZeusIOException(ERR_ZERO_IO, 4, "wait", e) def dout(self, adr, data, delay=0): """ Digital Output 신호 제어 Args: adr: output 포트의 시작 번호 (16-31) data: output 값. adr부터 LSB->MSB 순으로 대입 ex)'1001' * '1' = ON * '0' = OFF delay: Returns: Examples: 1. 딜레이 미적용 시 dout(16,'1001') * LSB가 시작 포트 번호에 해당하며, LSB->MSB 순서대로 할당 * 16 : 1 * 17 : 0 * 18 : 0 * 19 : 1 2. 딜레이 적용 시 dout(16, '1001', 1) 함수 호출 이후 1초 뒤에 dout이 반영됨. (스레드 호출하고 종료되어 다음 스크립트 진행) """ try: if delay == 0: self._dout(adr, data) elif delay > 0: self.__t = _DelayOut(self, adr, data, delay) self.__t.start() except Exception as e: raise ZeusIOException(ERR_ZERO_IO,5, "dout", e) def din(self, *adr): """ Digital Input 신호 수신 Args: *adr: input 포트 시작 번호, input 포트 끝 번호 Returns: Examples: din(1) : 포트 1을 읽음 din(1,4) : 포트 1부터 4까지 읽음 """ try: if len(adr) == 1: retval = self._din(adr[0]) return retval elif len(adr) == 2: retval = self._din(adr[0], adr[1]) return retval else: raise ZeusIOException(ERR_ZERO_IO,1, "din") except Exception as e: raise ZeusIOException(ERR_ZERO_IO,6, "din", e) def _dout(self, adr, data): """ Digital Output 메서드 Args: adr(int): output 포트 시작 번호 data(str): output 값. adr부터 LSB->MSB 순으로 대입 ex)'1001' * '1' = ON * '0' = OFF Returns: bool : 성공 여부 """ with self._lock: da = data.replace(" ", "") # str의 공백 제거 (입력의 편의를 위해 공백 문자 허용) row = adr % 32 col = adr // 32 dat = self._pars(da) dal = self.__replace("0" * 64, dat[1], row) cal = self.__replace("1" * 64, dat[2], row) d0 = dal[32:] c0 = cal[32:] d1 = dal[:32] c1 = cal[:32] d0int = int(d0, 2) c0int = int(c0, 2) d1int = int(d1, 2) c1int = int(c1, 2) res = self.__senddata(col, d0int, c0int, d1int, c1int) if res[0] == False: raise ZeusIOException(res[1], res[2], "_dout") return True def _din(self, *adr): """ Digital Input 메서드 Args: *adr: input 포트 시작 번호, input 포트 끝 번호 Returns: string : 성공시 반환. 읽어온 input data Exception : 실패 시 예외 호출 """ with self._lock: try: col = adr[0] // 32 # word no 계산 offset = col * 32 row = 63 - (adr[0] - offset) # LSB 기준으로 읽을 위치 data = self.__rcvd(col) except Exception as e: raise ZeusIOException(ERR_ZERO_IO, 6, "_din", e) if len(adr) == 1: return data[row:row + 1] elif len(adr) == 2: row1 = 63 - (adr[1] - offset) if row1 >= 0: return data[row1:row + 1] else: raise ZeusIOException(ERR_ZERO_IO, 1, "_din") else: raise ZeusIOException(ERR_ZERO_IO, 1, "_din") def _pars(self, st): """ 문자열을 체크하여 1, 0, *, n, s, d만이 포함된 문자열로 변환 Args: st(str): 문자열 Returns: list : [결과문자열, data, mask] """ if isinstance(st, str) and len(st) <= 32: # 32개 까지만 가능 if st[:2] == "0x" or st[:2] == "0X": # 문자열 앞에 0x, 0X이 있으면 에러 (hexadecimal) raise ZeusIOException(ERR_ZERO_IO,1,"_pars") elif st[:2] == "0b": # 문자열 앞에 0b 있으면 에러 (binary) raise ZeusIOException(ERR_ZERO_IO,1,"_pars") else: rslt = st dat = "" mask = "" for i in range(len(st) - 1, -1, -1): if (st[i] != "0" and st[i] != "1" and st[i] != "*" and st[i] != "n" and st[i] != "s" and st[i] != "d"): rslt = self.__replace(rslt, "*", len(st) - i - 1) if rslt[i] == "1": dat = "1" + dat else: dat = "0" + dat if rslt[i] == "*" or rslt[i] == "n" or rslt[i] == "s" or rslt[i] == "d": mask = "1" + mask else: mask = "0" + mask return [rslt, dat, mask] else: raise ZeusIOException(ERR_ZERO_IO,1,"_pars") def __senddata(self, col, d0int, m0int, d1int, m1int): """ digital output 데이터 전송 Args: col: d0int: 32bit dataL m0int: 32bit maskL d1int: 32bit dataH m1int: 32bit maskH Returns: ioctrl 실행 결과 """ res = self.__rb.ioctrl(col, d0int, m0int, d1int, m1int) return res def __rcvd(self, col): """ col 주소의 64bit 데이터를 수신 Args: col(int): 시작 어드레스 Returns: str : 64자리 비트스트림 데이터 """ dummy = 2 ** 32 - 1 res = self.__rb.ioctrl(col, dummy, dummy, dummy, dummy) if res[0] == False: raise ZeusIOException(res[1], res[2], "__rcvd") return self.__i2bs(res[2]) + self.__i2bs(res[1]) def __replace(self, st, ch, n): """ 오른쪽에서 n번째 글자를 변경 Args: st(str): 원래 문자열 ch(str): 대체할 문자열 n(int): 대체할 비트 위치 Returns: str : 변경된 문자열 """ if len(st) < n: stret = st else: num = len(st) - n num0 = len(st) - n - len(ch) stret = st[:(num0)] + ch + st[num:] stret = stret[-1 * len(st):] return stret def __bsNot(self, p): """ bitstream의 not 연산 Args: p(str): bitstream 형식의 str 데이터 Returns: str : bitstream 형식의 str 데이터 """ return bin(self.__bs2i(p) ^ 2 ** 32 - 1)[-len(p):] def __bsAnd(self, p, q): """ bitstream의 and 연산 Args: p(str): bitstream 형식의 str 데이터 q(str): bitstream 형식의 str 데이터 Returns: str : bitstream 형식의 str 데이터 """ if len(p) > len(q): l = q else: l = p return bin(self.__bs2i("1" + p) & self.__bs2i("1" + q))[-len(l):] def __bs2i(self, st): """ bitstream 형식의 str 데이터를 int 형으로 반환 Args: st: bitstream 형식의 str 데이터 Returns: int : int형으로 변환된 데이터 """ return int(st, 2) def __i2bs(self, n): """ integer -> 32자리 bit stream 으로 변환 (1 Word) Args: n: 10진수 정수 (int) Returns: str : bit stream 형식의 데이터 """ p = format(n, "032b") return p[-(len(p)):] class _DelayOut(threading.Thread): """ dout_delay 기능을 구현한 클래스 """ def __init__(self, IO, adr, data, delay): threading.Thread.__init__(self) self.io = IO self.adr = adr self.data = data self.delay = delay self.lock_obj = threading.Lock() def run(self): """ 스레드 동작. delay 만큼 대기한 다음 dout 메서드 호출 후 스레드 종료 Returns: """ try: self.lock_obj.acquire() time.sleep(self.delay) self.io.dout(self.adr,self.data) time.sleep(1) self.lock_obj.release() except Exception as e: raise ZeusIOException(ERR_ZERO_IO, 7, "DelayOut.run", e) if __name__ == "__main__": io = _ZeusIO() io.open() io.dout(16,'10101010') time.sleep(1) io.dout(16,'01010101')
zeusrobot
/_zeusrobot/zeus_io.py
zeus_io.py
ERR_ZERO_IO = 20 ERR_ZERO_SHM = 21 ERR_ZERO_POSITION = 22 ERR_ZERO_JOINT = 23 ERR_ZERO_COORDINATE = 24 ERR_ZERO_MOTIONPARAM = 25 ERR_ZERO_ROBOT = 26 ERR_ZERO_ROBOT_CRITICAL = 27 # Base Class class ZeusException(Exception): type = 0 """ Zeus Exception class Examples : try: ... except Exception as e: ZeusException(6,1,move,e) ZeusException(method=move, exc_msg=e) """ def __init__(self, type=0, code=0, method="{method_name}", exc_msg = None): self.class_name = self.__class__.__name__ # TEST용 self.type = type self.code = code self.method = method self.exc_msg = exc_msg if (self.type, self.code) == (0,0): super().__init__(f'\n{self.class_name}.{self.method} : {str(self.exc_msg)}') elif self.exc_msg: super().__init__(f'\n{self._errtype()}{str(self.type).zfill(2)}{str(self.code).zfill(2)} : {self.class_name}.{self.method} : {self._errmsg()} : {str(self.exc_msg)}') else: super().__init__(f'\n{self._errtype()}{str(self.type).zfill(2)}{str(self.code).zfill(2)} : {self.class_name}.{self.method} : {self._errmsg()}') def _errtype(self): if (self.type, self.code) in _errcode: return "A" elif (self.type, self.code) in _sys_errcode: if self.type == 10: return "E" elif self.type == 11: return "C" elif (self.type, self.code) in _app_errcode: return "A" def _errmsg(self): if (self.type, self.code) in _errcode: return _errcode[(self.type, self.code)] elif (self.type, self.code) in _sys_errcode: return _sys_errcode[(self.type, self.code)] elif (self.type, self.code) in _app_errcode: return _app_errcode[(self.type, self.code)] class ZeusRobotCriticalException(ZeusException): class_name = '_ZeusRobotCritical' type = ERR_ZERO_ROBOT_CRITICAL class ZeusRobotException(ZeusException): class_name = '_ZeusRobot' type = ERR_ZERO_ROBOT class ZeusIOException(ZeusException): class_name = '_ZeusIO' type = ERR_ZERO_IO class ZeusSharedMemoryException(ZeusException): class_name = '_ZeusSharedMemory' type = ERR_ZERO_SHM class ZeusPositionException(ZeusException): class_name = '_ZeusPosition' type = ERR_ZERO_POSITION class ZeusJointException(ZeusException): class_name = '_ZeusJoint' type = ERR_ZERO_JOINT class ZeusCoordinateException(ZeusException): class_name = '_ZeusCoordinate' type = ERR_ZERO_COORDINATE class ZeusMotionParamException(ZeusException): class_name = '_ZeusMotionParam' type = ERR_ZERO_MOTIONPARAM # (1,x), (2,x) : Python Level Error # (3,x) : Robot Control System Error # (4,x) : Robot Control System Error # (5,x) : Robot Control System Error # (20~,x) : API Level Error # (10,x) : System Normal Error (E00~E99) # (11,x) : System Critical Error (C00~C99) _errcode = {(1, 1): "Unable to allocate memory.", (1, 2): "Invalid descriptor", (2, 1): "Unable to connect to external device.", (2, 2): "Invalid command (CMD) key", (2, 3): "Invalid code data type", (2, 4): "Program is out of range.", (2, 5): "Invalid parameter key", (2, 6): "Invalid parameter data type", (2, 7): "Invalid data format", (2, 8): "Invalid parameter array type", (3, 0): "No error", (3, 1): "Robot operation not permitted.", (3, 2): "Parameter out of range.", (3, 3): "Invalid parameters", (3, 4): "Unable to change parameters.", (3, 10): "Robot motion planning error", (3, 11): "Position is outside of robot joint's moving ranges.", (3, 12): "Unreachable point error", (3, 13): "Exceeded robot joint's moving ranges during operations.", (3, 14): "Exceeded robot joint's maximum speed.", (3, 15): "Emergency Stop", (3, 16): "Paused", (3, 17): "Command (CMD) error occurred during operation.", (3, 18): "unable to obtain authorization for robot operation.", (3, 19): "CommandList buffer overflow", (3, 20): "Unable to save parameter.", (3, 21): "Unable to stop within specified time.", (3, 22): "Unable to calculate pulse offset", (3, 23): "Number of motion CMD overflow", (3, 24): "No force sensor", (3, 25): "No response from force sensor", (3, 26): "Frame error 1 with force sensor", (3, 27): "Frame error 2 with force sensor", (3, 28): "Invalid data from force sensor", (3, 29): "Unknown status of force sensor", (3, 30): "Invalid gain of force sensor", (3, 31): "Joint buffer is full", (3, 32): "Unable to access shared memory", (3, 33): "Unable to map shared memory", (3, 34): "(Reserved)", (3, 35): "User home position is not defined.", (3, 36): "(Reserved)", (3, 37): "Motion command exceeded speed limit.", (3, 99): "Robot control system error", (4, 1): "Invalid data type", (4, 2): "Parameter out of range", (4, 3): "Invalid parameter key", (4, 4): "Invalid parent object", (4, 5): "Exceeded the operable range.", (4, 6): "Communication error with the robot manipulator.", (4, 7): "Unable to initialize the robot.", (4, 8): "File not found.", (4, 9): "Unable to execute a robot program in Teaching mode.", (4, 10): "Robot operation not permitted.", (4, 11): "Index out of range", (4, 12): "Key not found.", (4, 13): "Unable to access shared memory", (4, 14): "Too many argument", (4, 15): "Data length is too much", (4, 16): "Invalid points at Area_Box, same position.", (4, 17): "(ForceSensor) Unable to set zeus offset", (4, 18): "(ForceSensor) Unable to read data", (4, 19): "Communication error", (4, 20): "Invalid Points at Area_Box, determinant equals 0", (4, 21): "Unable to start user program in error state", (4, 22): "Unable to start user program in invalid mode", (4, 23): "Unable to register to System manager", (4, 24): "sleep api must be called in main thread", (4, 25): "hook api must be called in main thread", (4, 26): "Teachdata is already opened.", (4, 27): "Unable to open during Teach mode.", (4, 28): "teach_data is too old! Please convert it.", (4, 29): "unsupported teachdata version!", (4, 30): "teach_data is not opened.", (4, 31): "teach_data is not opened with R/W.", (4, 32): "Invalid file name.", (4, 33): "teach_data is already opened by another process.", (4, 34): "Upgrade is not needed.", (4, 35): "Invalid teach_data format", (4, 36): "Unable to open file", (4, 37): "Unable to R/W open file", (4, 38): "Unable to use RobSys class with i611Robot class", (4, 98): "Unprocessed error", (4, 99): "Unprocessed error", (5, 1): "Emergency stop", (5, 2): "Stopped by the terminate instruction.", (5, 3): "Error in the robot control program.", (5, 4): "Error in the system program.", (5, 5): "ABS lost", (5, 6): "Timeout in emergency stop process", (5, 7): "Stopped by force sensor detection.", (5, 99):"Unprocessed error"} _sys_errcode = {(10, 1): '"init.py" not found.', (10, 2): 'Error in "init.py"', (10, 3): "Unable to execute the robot program.", (10, 4): "No robot program was set.", (10, 5): "Robot program was in an unexecutable mode.", (10, 6): "Robot motion API was called before the i611Robot class's open ( ) was called.", (10, 7): "Robot program was executed while the ABS lost.", (10, 8): "Robot program terminated abnormally.", (10, 9): "i611Robot class's open() was called while emergency stop.", (10, 10): "i611Robot class's open() was called while servo off.", (10, 11): "unable to obtain authorization for robot operation.", (10, 12): "Robot program failed to communicate with the system manager.", (10, 13): "Exception of emergency stop was not caught.", (10, 14): "Robot program's exit() terminated abnormally.", (10, 15): "Robot program terminated with an exception.", (10, 16): "Exception of deceleration stop was not caught.", (10, 17): "System's terminating process was not completed properly.", (10, 18): "Unable to access the memory I/O.", (10, 19): "i611Robot class's instance was made multiple times in a process.", (10, 20): "i611Robot class's open() was called multiple times in a process.", (10, 21): "Invalid call of API from another thread occurred.", (10, 22): "Invalid call of API while emergency stop.", (10, 23): "Invalid call of API while servo off.", (10, 24): "PC's Shared Memory was not updated. Please execute ZSP Program and open ZSP Server", (10, 40): "Teaching Program terminated abnormally.", (10, 50): "Force sensor API was called before the force sensor's open() was called.", (10, 51): "Exception of the force sensor event was not caught.", (10, 52): "Unable to access the force sensor's device.", (10, 53): "Used size under /home/i611usr exceeds limit", (10, 99): "Other error", (11, 1): "Unable to start the system manager.", (11, 2): "System manager terminated abnormally.", (11, 3): "System manager failed to communicate with the control.", (11, 4): "Error occurred during Jog mode.", (11, 5): "Control manager terminated abnormally.", (11, 6): "Not enough free space on storage.", (11, 10): "(Joint) Circuit error", (11, 11): "(Joint) Current over", (11, 12): "(Joint) Brake error", (11, 13): "(Joint) Torque over", (11, 14): "(Joint) Overload error for motor", (11, 15): "(Joint) Driving voltage decreased", (11, 16): "(Joint) Power unit error", (11, 17): "(Joint) Servo motor error", (11, 18): "(Joint) Error occurred at servo motor On check 1", (11, 19): "(Joint) Error occurred at servo motor On check 2", (11, 20): "(Joint) ABS lost: Over speed error during power shut-off", (11, 21): "(Joint) ABS lost: ABS's home position was not saved", (11, 22): "(Joint) ABS lost: Encoder detection error", (11, 23): "(Joint) ABS lost: Encoder detector error", (11, 24): "(Joint) ABS lost: ABS lost occurred due to human factors.", (11, 25): "(Joint) Cannot change state", (11, 26): "Error occurred at the top I/O.", (11, 27): "Error occurred at the force sensor.", (11, 28): "Error occurred in the internal monitor process.", (11, 29): "Cooling fan stopped.", (11, 30): "Regenerative resistor error 1 occurred.", (11, 31): "Main circuit relay breakdown.", (11, 32): 'Wiring error in "emergency stop circuit"', (11, 33): 'Wiring error in "mode circuit"', (11, 34): "Control power supply error", (11, 35): "The rush prevent resistor occurred a heat error", (11, 36): "Regenerative resistor error 2", (11, 37): "Regenerative resistor error 3", (11, 38): "Safety circuit error", (11, 39): "Ethercat communication lost", (11, 40): 'Duplication signal inconsistency in the "door circuit."', (11, 41): 'Duplication signal inconsistency in the "mode circuit."', (11, 42): "Slave state unmatch error", (11, 43): "Communication error occurred for interrupt.", (11, 44): "Slave error occurred in over speed.", (11, 47): "Positioning error", (11, 48): "ArmI/O power error", (11, 58): "SPI circuit error", (11, 59): "Robot system file corrupted.", (11, 60): "Task error", (11, 61): "(Joint) Parameter error", (11, 89): "Ethercat communication error", (11, 90): "Motion error", (11, 91): "(Joint) Motion limit error", (11, 92): "(Joint) Parameter error", (11, 93): "ENC error", (11, 94): "Board error", (11, 95): "Ethercat Sync0-PDI error", (11, 96): "Ethercat Sync0-PWM error", (11, 98): "Unexpected power shutdown.", (11, 99): "Other types of error"} _app_errcode = {(20, 1): 'Invalid Parameter', (20, 2): 'Failed to open IO', (20, 3): 'Failed to close IO', (20, 4): 'IO wait Error', (20, 5): 'Digital Output Error', (20, 6): 'Digital Input Error', (20, 7): 'Delay Out Error', (20, 99): 'Unkown Error', (21, 1): 'Invalid Parameter', (21, 2): 'Invalid Address', (21, 3): 'Failed to open SharedMemory', (21, 4): 'Failed to close SharedMemory', (21, 5): 'Socket failed to read shared memory', (21, 6): 'Socket failed to write shared memory', (21, 7): 'Socket failed to write shared memory (system area)', (21, 99): 'Unkown Error', (22, 1): 'Invalid Parameter', (22, 2): 'Failed to transform Position data to list', (22, 99): 'Unkown Error', (23, 1): 'Invalid Parameter', (23, 2): 'Failed to transform Joint data to list', (23, 99): 'Unkown Error', (24, 1): 'Invalid Parameter', (24, 2): 'Parent is not a type of _BASE or _ParentContainer', (24, 3): 'Failed to transform world coordinate to base coordinate', (24, 4): 'Failed to transform base coordinate to world coordinate', (24, 5): 'Failed to get inverse matrix', (24, 6): 'Invalid Parameter', (24, 7): 'Invalid Parameter', (24, 8): 'Invalid Parameter', (24, 9): 'Invalid Parameter', (24, 10): 'Invalid Parameter', (24, 99): 'Unkown Error', (25, 1): 'Invalid Parameter', (25, 2): 'Failed to clear MotionParam', (25, 3): 'Faield to set MotionParam', (25, 99): 'Unkown Error', (26, 1): 'Invalid Parameter', (26, 2): 'Robot Controller is in Error state', (26, 3): 'Failed to open system control RobotClient', (26, 4): 'Failed to open RobotClient', (26, 5): 'Failed to close RobotClient', (26, 6): 'Failed to get MotionParam', (26, 7): 'Failed to clear MotionParam', (26, 8): 'Failed to move_arc : posture of two points must be given and same as current posture', (26, 9): 'Socket failed to connect RobotClient', (26, 10): 'Unable to open rblib', (26, 11): 'Socket failed to close RobotClient', (26, 12): 'ZSP system state is not ready yet', (26, 99): 'Unkown Error', }
zeusrobot
/_zeusrobot/zeus_error.py
zeus_error.py
import json import socket import math import threading import time import sys import os from . import zeus_common from .zeus_error import * sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))) from .zeus_shared_memory.zeus_shared_memory_api import ZeusShm # #for module check # import zeus_common # from zeus_error import * # from zeus_shared_memory.zeus_shared_memory_api import ZeusShm __version__ = [0,0,0,0] _use_mt = False # multiturn 사용 옵션. ZeusRobot 클래스의 use_mt 메서드로 활성화. Position 클래스, ZeusRobot 클래스에서 사용 class RobotClient(object): _N_RCVBUF = 1024 _SVSTS_OFF = 1 # OFF _SVSTS_ON = 4 # ON _EMO_OFF = 0 # False _EMO_ON = 1 # True _NOP = 1 _SVSW = 2 _PLSMOVE = 3 _MTRMOVE = 4 _JNTMOVE = 5 _PTPMOVE = 6 _CPMOVE = 7 _SET_TOOL = 8 _CHANGE_TOOL = 9 _ASYNCM = 10 _PASSM = 11 _OVERLAP = 12 _MARK = 13 _JMARK = 14 _IOCTRL = 15 _ZONE = 16 _J2R = 17 _R2J = 18 _SYSSTS = 19 _TRMOVE = 20 _ABORTM = 21 _JOINM = 22 _SLSPEED = 23 _ACQ_PERMISSION = 24 _REL_PERMISSION = 25 _SET_MDO = 26 _ENABLE_MDO = 27 _DISABLE_MDO = 28 _PMARK = 29 _VERSION = 30 _ENCRST = 31 _SAVEPARAMS = 32 _CALCPLSOFFSET = 33 _SET_LOG_LEVEL = 34 _FSCTRL = 35 _PTPPLAN = 36 _CPPLAN = 37 _PTPPLAN_W_SP = 38 _CPPLAN_W_SP = 39 _SYSCTRL = 40 _OPTCPMOVE = 41 _OPTCPPLAN = 42 _PTPMOVE_MT = 43 _MARK_MT = 44 _J2R_MT = 45 _R2J_MT = 46 _PTPPLAN_MT = 47 _PTPPLAN_W_SP_MT = 48 _JNTRMOVE = 49 _JNTRMOVE_WO_CHK= 50 _CPRMOVE = 51 _CPRPLAN = 52 _CPRPLAN_W_SP = 53 _SUSPENDM = 54 _RESUMEM = 55 _GETMT = 56 _RELBRK = 57 _CLPBRK = 58 _ARCMOVE = 59 _CIRMOVE = 60 _MMARK = 61 _SETVENV = 62 def __init__(self, host, port): self._host = host # '127.0.0.1' self._port = port # 12345 self._lock = threading.Lock() self._sock = None self._linear_joint_support = False # use to select angle def __del__(self): self.close() def open(self, timeout=None): try: if self._sock is None: self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if timeout: self._sock.settimeout(timeout) self._sock.connect((self._host, self._port)) self._sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000)) self._sock.settimeout(None) except socket.error as e: raise ZeusRobotException(ERR_ZERO_ROBOT, 9, "RobotClient.open", e) ver = self.version() if ver[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 10, "RobotClient.open") else: if ver[1] >= 1 or ver[2] >= 8: self._linear_joint_support = True else: self._linear_joint_support = False return True def close(self): try: if self._sock is not None: self._sock.close() self._sock = None except socket.error as e: raise ZeusRobotException(ERR_ZERO_ROBOT, 11, "RobotClient.close", e) return True def chkRes(self, cmdid, buf): if buf == '': print('chkRes buff is empty') return [False, 99, 1] # print(buf) jsonobj = json.loads(buf) if cmdid != jsonobj['cmd']: self._sock.settimeout(1) buf = self._sock.recv(self._N_RCVBUF) self._sock.settimeout(None) jsonobj = json.loads(buf) if cmdid != jsonobj['cmd']: print('chkRes cmd ID error') return [False, 99, 1] return jsonobj['results'] def nop(self): params = {'cmd':RobotClient._NOP, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def svctrl(self, sw): params = {'cmd':RobotClient._SVSW, 'params':[int(sw)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def plsmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): params = {'cmd':RobotClient._PLSMOVE, 'params':[[int(ax1), int(ax2), int(ax3), \ int(ax4), int(ax5), int(ax6)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def mtrmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): params = {'cmd':RobotClient._MTRMOVE, 'params':[[math.radians(ax1), \ math.radians(ax2), math.radians(ax3), math.radians(ax4), \ math.radians(ax5), math.radians(ax6)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def jntmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): params = {} if self._linear_joint_support == True: params = {'cmd':RobotClient._JNTMOVE, 'params':[[float(ax1), \ float(ax2), \ float(ax3), \ float(ax4), \ float(ax5), \ float(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} else: params = {'cmd':RobotClient._JNTMOVE, 'params':[[math.radians(ax1), \ math.radians(ax2), \ math.radians(ax3), \ math.radians(ax4), \ math.radians(ax5), \ math.radians(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def ptpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): params = {'cmd':RobotClient._PTPMOVE, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, \ math.radians(rz), math.radians(ry), math.radians(rx), int(posture), \ int(rbcoord)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): params = {'cmd':RobotClient._CPMOVE, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, \ math.radians(rz), math.radians(ry), math.radians(rx), int(posture), \ int(rbcoord)], float(speed) / 1000.0, float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def optcpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): params = {'cmd':RobotClient._OPTCPMOVE, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, math.radians(rz), math.radians(ry), math.radians(rx), int(posture), int(rbcoord)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def settool(self, tid, offx, offy, offz, offrz, offry, offrx): params = {'cmd':RobotClient._SET_TOOL, 'params':[int(tid), offx / 1000.0, offy / 1000.0, offz / 1000.0, math.radians(offrz), math.radians(offry), math.radians(offrx)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def changetool(self, tid): params = {'cmd':RobotClient._CHANGE_TOOL, 'params':[int(tid)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def asyncm(self, sw): params = {'cmd':RobotClient._ASYNCM, 'params':[int(sw)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def passm(self, sw): params = {'cmd':RobotClient._PASSM, 'params':[int(sw)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def overlap(self, distance): params = {'cmd':RobotClient._OVERLAP, 'params':[float(distance) / 1000.0]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def mark(self): params = {'cmd':RobotClient._MARK, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: # mm->m retlist[1] = float(retlist[1]) * 1000.0 retlist[2] = float(retlist[2]) * 1000.0 retlist[3] = float(retlist[3]) * 1000.0 # rad->deg retlist[4] = math.degrees(float(retlist[4])) retlist[5] = math.degrees(float(retlist[5])) retlist[6] = math.degrees(float(retlist[6])) return retlist def jmark(self): params = {'cmd':RobotClient._JMARK, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: if self._linear_joint_support == True: pass else: # rad->deg retlist[1] = math.degrees(retlist[1]) retlist[2] = math.degrees(retlist[2]) retlist[3] = math.degrees(retlist[3]) retlist[4] = math.degrees(retlist[4]) retlist[5] = math.degrees(retlist[5]) retlist[6] = math.degrees(retlist[6]) return retlist def ioctrl(self, wordno, dataL, maskL, dataH, maskH): params = {'cmd':RobotClient._IOCTRL, 'params':[wordno, dataL, maskL, dataH, maskH]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def zone(self, pulse): params = {'cmd':RobotClient._ZONE, 'params':[pulse]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def j2r(self, ax1, ax2, ax3, ax4, ax5, ax6, rbcoord): params = {} if self._linear_joint_support == True: params = {'cmd':RobotClient._J2R, 'params':[float(ax1), float(ax2), float(ax3), float(ax4), float(ax5), float(ax6), rbcoord]} else: params = {'cmd':RobotClient._J2R, 'params':[math.radians(ax1), math.radians(ax2), math.radians(ax3), math.radians(ax4), math.radians(ax5), math.radians(ax6), rbcoord]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: # m -> mm retlist[1] = float(retlist[1])*1000.0 retlist[2] = float(retlist[2])*1000.0 retlist[3] = float(retlist[3])*1000.0 # rad->deg retlist[4] = math.degrees(float(retlist[4])) retlist[5] = math.degrees(float(retlist[5])) retlist[6] = math.degrees(float(retlist[6])) return retlist def r2j(self, x, y, z, rz, ry, rx, posture, rbcoord): params = {'cmd':RobotClient._R2J, 'params':[x / 1000.0, y / 1000.0, z / 1000.0, math.radians(rz), math.radians(ry), math.radians(rx), posture, rbcoord]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: if self._linear_joint_support == True: pass else: # rad->deg retlist[1] = math.degrees(retlist[1]) retlist[2] = math.degrees(retlist[2]) retlist[3] = math.degrees(retlist[3]) retlist[4] = math.degrees(retlist[4]) retlist[5] = math.degrees(retlist[5]) retlist[6] = math.degrees(retlist[6]) return retlist def syssts(self, typ): params = {'cmd':RobotClient._SYSSTS, 'params':[int(typ)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def trmove(self, x, y, z, rz, ry, rx, speed, acct, dacct): params = {'cmd':RobotClient._TRMOVE, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, math.radians(rz), \ math.radians(ry), math.radians(rx)], float(speed) / 1000.0, float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def abortm(self): params = {'cmd':RobotClient._ABORTM, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def joinm(self): params = {'cmd':RobotClient._JOINM, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def slspeed(self, spd): params = {'cmd':RobotClient._SLSPEED, 'params':[float(spd)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def acq_permission(self): params = {'cmd':RobotClient._ACQ_PERMISSION, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def rel_permission(self): params = {'cmd':RobotClient._REL_PERMISSION, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def set_mdo(self, mdoid, portno, value, kind, distance): params = {'cmd':RobotClient._SET_MDO, 'params':[int(mdoid), int(portno), \ int(value), int(kind), float(distance) / 1000.0]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def enable_mdo(self, bitfield): params = {'cmd':RobotClient._ENABLE_MDO, 'params':[int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def disable_mdo(self, bitfield): params = {'cmd':RobotClient._DISABLE_MDO, 'params':[int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def pmark(self, sw): params = {'cmd':RobotClient._PMARK, 'params':[int(sw)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def version(self): params = {'cmd':RobotClient._VERSION, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def encrst(self, bitfield): params = {'cmd':RobotClient._ENCRST, 'params':[int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def saveparams(self): params = {'cmd':RobotClient._SAVEPARAMS, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def calcplsoffset(self, typ, bitfield): params = {'cmd':RobotClient._CALCPLSOFFSET, 'params':[int(typ), int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def set_log_level(self, level): params = {'cmd':RobotClient._SET_LOG_LEVEL, 'params':[int(level)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def fsctrl(self, cmd): params = {'cmd':RobotClient._FSCTRL, 'params':[int(cmd)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def ptpplan(self, ex, ey, ez, erz, ery, erx, eposture, erbcoord, speed, acct, dacct): params = {'cmd':RobotClient._PTPPLAN, 'params':[[ex / 1000.0, ey / 1000.0, ez / 1000.0, \ math.radians(erz), math.radians(ery), math.radians(erx), int(eposture), \ int(erbcoord)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cpplan(self, ex, ey, ez, erz, ery, erx, eposture, erbcoord, speed, acct, dacct): params = {'cmd':RobotClient._CPPLAN, 'params':[[ex / 1000.0, ey / 1000.0, ez / 1000.0, \ math.radians(erz), math.radians(ery), math.radians(erx), int(eposture), \ int(erbcoord)], float(speed) / 1000.0, float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def ptpplan_w_sp(self, sx, sy, sz, srz, sry, srx, sposture, srbcoord, ex, ey, \ ez, erz, ery, erx, eposture, erbcoord, speed, acct, dacct): params = {'cmd':RobotClient._PTPPLAN_W_SP, 'params':[[sx / 1000.0, sy / 1000.0, \ sz / 1000.0, math.radians(srz), math.radians(sry), math.radians(srx), \ int(sposture), int(srbcoord)], [ex/1000.0,ey/1000.0,ez/1000.0, \ math.radians(erz),math.radians(ery),math.radians(erx), int(eposture), \ int(erbcoord)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cpplan_w_sp(self, sx, sy, sz, srz, sry, srx, sposture, srbcoord, ex, ey, ez, \ erz, ery, erx, eposture, erbcoord, speed, acct, dacct): params = {'cmd':RobotClient._CPPLAN_W_SP, 'params':[[sx / 1000.0, sy / 1000.0, sz / 1000.0, \ math.radians(srz), math.radians(sry), math.radians(srx), int(sposture), \ int(srbcoord)], [ex/1000.0,ey/1000.0,ez/1000.0,math.radians(erz),\ math.radians(ery),math.radians(erx), int(eposture), int(erbcoord)], \ float(speed) / 1000.0, float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def optcpplan(self, ex, ey, ez, erz, ery, erx, eposture, erbcoord, speed, acct, dacct): params = {'cmd':RobotClient._OPTCPPLAN, 'params':[[ex / 1000.0, ey / 1000.0, ez / 1000.0, math.radians(erz), math.radians(ery), math.radians(erx), int(eposture), int(erbcoord)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def sysctrl(self, ctrlid, arg): params = {'cmd':RobotClient._SYSCTRL, 'params':[int(ctrlid), int(arg)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def ptpmove_mt(self, x, y, z, rz, ry, rx, posture, rbcoord, multiturn, ik_solver_option, speed, acct, dacct): if (multiturn & 0xFF000000) == 0xFF000000: ik_solver_option = 0x00000000 params = {'cmd':RobotClient._PTPMOVE_MT, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, \ math.radians(rz), math.radians(ry), math.radians(rx), int(posture), \ int(rbcoord), int(multiturn), ik_solver_option], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def mark_mt(self): params = {'cmd':RobotClient._MARK_MT, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: # mm->m retlist[1] = float(retlist[1]) * 1000.0 retlist[2] = float(retlist[2]) * 1000.0 retlist[3] = float(retlist[3]) * 1000.0 # rad->deg retlist[4] = math.degrees(float(retlist[4])) retlist[5] = math.degrees(float(retlist[5])) retlist[6] = math.degrees(float(retlist[6])) retlist[9] = int(retlist[9]) return retlist def j2r_mt(self, ax1, ax2, ax3, ax4, ax5, ax6, rbcoord): params = {} if self._linear_joint_support == True: params = {'cmd':RobotClient._J2R_MT, 'params':[float(ax1), float(ax2), float(ax3), float(ax4), float(ax5), float(ax6), rbcoord]} else: params = {'cmd':RobotClient._J2R_MT, 'params':[math.radians(ax1), math.radians(ax2), math.radians(ax3), math.radians(ax4), math.radians(ax5), math.radians(ax6), rbcoord]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: # m -> mm retlist[1] = float(retlist[1])*1000.0 retlist[2] = float(retlist[2])*1000.0 retlist[3] = float(retlist[3])*1000.0 # rad->deg retlist[4] = math.degrees(float(retlist[4])) retlist[5] = math.degrees(float(retlist[5])) retlist[6] = math.degrees(float(retlist[6])) retlist[9] = int(retlist[9]) return retlist def r2j_mt(self, x, y, z, rz, ry, rx, posture, rbcoord, multiturn,ik_solver_option): if (multiturn & 0xFF000000) == 0xFF000000: ik_solver_option = 0x00000000 params = {'cmd':RobotClient._R2J_MT, 'params':[x / 1000.0, y / 1000.0, z / 1000.0, math.radians(rz), math.radians(ry), math.radians(rx), posture, rbcoord, multiturn, ik_solver_option]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: if self._linear_joint_support == True: pass else: # rad->deg retlist[1] = math.degrees(retlist[1]) retlist[2] = math.degrees(retlist[2]) retlist[3] = math.degrees(retlist[3]) retlist[4] = math.degrees(retlist[4]) retlist[5] = math.degrees(retlist[5]) retlist[6] = math.degrees(retlist[6]) return retlist def ptpplan_mt(self, ex, ey, ez, erz, ery, erx, eposture, erbcoord, emultiturn, ik_solver_option, speed, acct, dacct): if (emultiturn & 0xFF000000) == 0xFF000000: ik_solver_option = 0x00000000 params = {'cmd':RobotClient._PTPPLAN_MT, 'params':[[ex / 1000.0, ey / 1000.0, ez / 1000.0, \ math.radians(erz), math.radians(ery), math.radians(erx), int(eposture), \ int(erbcoord), int(emultiturn), int(ik_solver_option)], \ float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def ptpplan_w_sp_mt(self, sx, sy, sz, srz, sry, srx, sposture, srbcoord, smultiturn, sik_solver_option, ex, ey, \ ez, erz, ery, erx, eposture, erbcoord, emultiturn, eik_solver_option, speed, acct, dacct): if (smultiturn & 0xFF000000) == 0xFF000000: sik_solver_option = 0x00000000 if (emultiturn & 0xFF000000) == 0xFF000000: eik_solver_option = 0x00000000 params = {'cmd':RobotClient._PTPPLAN_W_SP_MT, 'params':[[sx / 1000.0, sy / 1000.0, \ sz / 1000.0, math.radians(srz), math.radians(sry), math.radians(srx), \ int(sposture), int(srbcoord), int(smultiturn), int(sik_solver_option)], [ex/1000.0,ey/1000.0,ez/1000.0, \ math.radians(erz),math.radians(ery),math.radians(erx), int(eposture), \ int(erbcoord), int(emultiturn), int(eik_solver_option)], float(speed), float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def jntrmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): params = {} if self._linear_joint_support == True: params = {'cmd':RobotClient._JNTRMOVE, 'params':[[float(ax1), \ float(ax2), \ float(ax3), \ float(ax4), \ float(ax5), \ float(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} else: params = {'cmd':RobotClient._JNTRMOVE, 'params':[[math.radians(ax1), \ math.radians(ax2), \ math.radians(ax3), \ math.radians(ax4), \ math.radians(ax5), \ math.radians(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def jntrmove_wo_chk(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): params = {} if self._linear_joint_support == True: params = {'cmd':RobotClient._JNTRMOVE_WO_CHK, 'params':[[float(ax1), \ float(ax2), \ float(ax3), \ float(ax4), \ float(ax5), \ float(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} else: params = {'cmd':RobotClient._JNTRMOVE_WO_CHK, 'params':[[math.radians(ax1), \ math.radians(ax2), \ math.radians(ax3), \ math.radians(ax4), \ math.radians(ax5), \ math.radians(ax6)], \ float(speed), \ float(acct), \ float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cprmove(self, x, y, z, rz, ry, rx, speed, acct, dacct): params = {'cmd':RobotClient._CPRMOVE, 'params':[[x / 1000.0, y / 1000.0, z / 1000.0, math.radians(rz), \ math.radians(ry), math.radians(rx)], speed / 1000.0, float(acct), float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cprplan(self, dx, dy, dz, drz, dry, drx, speed, acct, dacct): params = {'cmd':RobotClient._CPRPLAN, 'params':[[dx / 1000.0, \ dy / 1000.0, \ dz / 1000.0, \ math.radians(drz), \ math.radians(dry), \ math.radians(drx), \ ], \ speed / 1000.0, \ float(acct), \ float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cprplan_w_sp(self, x, y, z, rz, ry, rx, posture, rbcoord, \ dx, dy, dz, drz, dry, drx, speed, acct, dacct): params = {'cmd':RobotClient._CPRPLAN_W_SP, 'params':[[x / 1000.0, \ y / 1000.0, \ z / 1000.0, \ math.radians(rz), \ math.radians(ry), \ math.radians(rx), \ int(posture), \ int(rbcoord), \ ], \ [dx/1000.0,\ dy/1000.0,\ dz/1000.0,\ math.radians(drz),\ math.radians(dry),\ math.radians(drx) ], \ speed/1000.0, \ float(acct), \ float(dacct)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def suspendm(self, tmout): params = {'cmd':RobotClient._SUSPENDM, 'params':[float(tmout)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def resumem(self): params = {'cmd':RobotClient._RESUMEM, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def getmt(self, bx, by, bz, brz, bry, brx, posture, rbcoord, multiturn, str_x, str_y, str_z, str_rz, str_ry, str_rx): if (multiturn & 0xFF000000) == 0xFF000000: ik_solver_option = 0x00000000 params = {'cmd':RobotClient._GETMT, 'params':[[bx / 1000.0, by / 1000.0, bz / 1000.0, \ math.radians(brz), math.radians(bry), math.radians(brx), int(posture), \ int(rbcoord), int(multiturn)], \ [str_x, str_y, str_z, str_rz, str_ry, str_rx]]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def relbrk(self, bitfield): params = {'cmd':RobotClient._RELBRK, 'params':[int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def clpbrk(self, bitfield): params = {'cmd':RobotClient._CLPBRK, 'params':[int(bitfield)]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def arcmove(self, px, py, pz, prz, pry, prx, pposture, prbcoord,\ ex, ey, ez, erz, ery, erx, eposture, erbcoord,\ speed, acct, dacct,\ orientation): params = {'cmd':RobotClient._ARCMOVE, 'params':[\ [px/1000.0,py/1000.0,pz/1000.0,\ math.radians(prz),math.radians(pry),math.radians(prx), int(pposture),int(prbcoord)],\ [ex/1000.0,ey/1000.0,ez/1000.0,\ math.radians(erz),math.radians(ery),math.radians(erx), int(eposture),int(erbcoord)],\ float(speed)/1000.0,float(acct),float(dacct),\ int(orientation) ]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def cirmove(self, p1x, p1y, p1z, p1rz, p1ry, p1rx, p1posture, p1rbcoord,\ p2x, p2y, p2z, p2rz, p2ry, p2rx, p2posture, p2rbcoord,\ speed, acct, dacct,\ orientation): params = {'cmd':RobotClient._CIRMOVE, 'params':[\ [p1x/1000.0,p1y/1000.0,p1z/1000.0,\ math.radians(p1rz),math.radians(p1ry),math.radians(p1rx), int(p1posture),int(p1rbcoord)],\ [p2x/1000.0,p2y/1000.0,p2z/1000.0,\ math.radians(p2rz),math.radians(p2ry),math.radians(p2rx), int(p2posture),int(p2rbcoord)],\ float(speed)/1000.0,float(acct),float(dacct),\ int(orientation) ]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) def mmark(self): params = {'cmd':RobotClient._MMARK, 'params':[]} with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) retlist = self.chkRes(params['cmd'], buf) if retlist[0] == True: for i in range(6): retlist[i] = math.degrees(float(retlist[i])) return retlist def setvenv(self, venv_type, ts, te): params = {'cmd':RobotClient._SETVENV, 'params':[int(venv_type), float(ts), float(te)]} print(json.dumps(params)) with self._lock: if self._sock.send(json.dumps(params).encode('ascii')) <= 0: print("tx error\n") return False buf = self._sock.recv(self._N_RCVBUF) return self.chkRes(params['cmd'], buf) class _ZeusRobot(object): """ ZeusRobot 클래스 MCS와 통신하여 로봇의 모션 제어 수행 Methods: __init__(self, host="192.168.0.23", port=12345): 클래스 생성자 __del__(self): 클래스 소멸자 __initialize(): 로봇 인스턴스 초기화 [Public] show_version_mcs(self): MCS 버전 취득 show_version_api(self): API 버전 취득 open(self): 로봇 연결 close(self): 로봇 연결 종료 ioctrl(self, wordno, dataL, maskL, dataH, maskH): 컨트롤러 IO 제어 is_open(self): 로봇 연결 상태 취득 is_permitted(self): 로봇 제어 권한 획득 여부 취득 is_moving(self): 로봇 모션 동작 상태 여부 취득 is_paused(self): 로봇 모션 일시정지 상태 여부 취득 is_async(self): 비동기 제어 상태 여부 취득 get_servo_status(self): 서보 모터 상태 취득 set_servo_off(self): 서보 모터 OFF (브레이크) get_system_status(self, type): MCS 비상정지 상태 정보 취득 async_motion(self, sw): 비동기 제어 (sw=1: 활성화 / sw=2: 비활성화) wait_motion(self): 비동기 제어 중 모션 완료 대기 stop(self): 로봇 모션 중단 (큐에 등록된 로봇 동작 명령 초기화) pause(self): 로봇 모션 일시정지 resume(self): 로봇 모션 재개 override(self, ovr): MotionParam 의 속도 관련 파라미터 재정의 [%] (1-100, int) set_motionparam(self, *args, **kwargs): 로봇 MotionParam 설정 [MotionParam 객체 또는 인자 값] get_motionparam(self): 로봇 MotionParam 취득 clear_motionparam(self): 로봇의 MotionParam 초기화 set_tooloffset(self, *args, **kwargs): 툴 오프셋 설정 change_tool(self, tid): 툴 변경 [0-8] set_mdo(self, mdoid, portno, value, mode, distance): MDO 설정 enable_mdo(self, bitfield): MDO 활성화 disable_mdo(self, bitfield): MDO 비활성화 get_position(self): 현재 로봇 엔드이펙터 위치 정보를 Position 타입 데이터로 취득 get_joint(self): 현재 로봇 암 각도 정보를 Position 타입 데이터로 취득 Joint2Position(self, *jnt): Joint 타입 데이터를 Position 타입으로 변환 Position2Joint(self, *pos): Position 타입 데이터를 Joint 타입으로 변환 adjust_mt(self, pos, str_x, str_y, str_z, str_rz, str_ry, str_rx): multiturn 값 조정 (좌표를 문자열로 변환 시의 근접 값) set_use_mt(flag): multiturn 사용 여부 플래그 move(self, *cmd): 로봇 이동 모션 (Joint or PTP) home(self): 로봇 Home 위치 이동 모션 move_line(self, *cmd): 로봇 직선 이동 모션 (Linear) move_optline(self, *cmd): 로봇 최적 직선 이동 모션 (Optimized Linear) move_rel_tool(self, *args, **kwargs): 로봇 Tool 좌표계 기준 상대이동 모션 move_rel_jnt(self, *cmd): 로봇 Joint 기준 상대이동 모션 move_rel_line(self, *cmd): 로봇 Cartesian 좌표계 기준 직선 상대이동 모션 (Linear) move_arc: 로봇 원호 모션 move_circle: 로봇 원 동작 [Internal] __acq_permission(self): 로봇 제어 권한 요청 __rel_permission(self): 로봇 제어 권한 릴리즈 __changeMotionParam: MotionParam 변경 _syssts: MCS 시스템 상태 정보 취득 __plsmove: 입력된 펄스값으로 Joint 이동 모션 __mtrmove: 입력된 모터값으로 Joint 상대이동 모션 _jntmove: move 메서드 내 Joint 모션 wrapper 메서드 __jntmove: 입력된 Joint 값으로 이동 모션 _ptpmove: move 메서드 내 PTP 모션 wrapper 메서드 __ptpmove: 입력된 Cartesian 좌표계 값으로 PTP 이동 모션 __ptpmove_mt: 입력된 Cartesian 좌표계 값으로 PTP 이동 모션 (multiturn 반영) _cpmove: Linear 모션 wrapper 메서드 __cpmove: 입력된 Cartesian 좌표계 값으로 직선 이동 모션 (등속도) _optcpmove: Optimized Linear 모션 wrapper 메서드 __optcpmove: 입력된 Cartesian 좌표계 값으로 최적 직선 이동 모션 (구간별 최고속도로 이동) """ def __init__(self, host="192.168.0.23", port=12345): self._isOpen = False # 로봇 오픈 상태 self._isMoving = False # 로봇 모션 진행 상태 self._isPause = False # 로봇 모션 일시 정지 상태 self._isPermit = False # 로봇 조작 권한 취득 상태 self._isError = False # 로봇 에러 상태 self._isAsync = False # 로봇 비동기 제어 모드 상태 self._isConnected = False # 이더넷 케이블 연결 상태 # internal attributes self.__ovr = 1.0 # MotionParam Override 비율 self.__mpdef = _MotionParam().mp2list() # MotionParam 기본값 self.__mp = self.__mpdef # MotionParam self.__mpovr = self.__mpdef # MotionParam (Override 적용) self.__first_parameter_update = True # 모션 파라미터 초기 업데이트 플래그 self._host = host self._port = port # 이더넷 연결 상태 확인 소켓 (8001 포트) self.s_hb = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s_hb.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s_hb.connect((self._host, 8001)) self.s_hb.settimeout(2) self._isConnected = True # 이더넷 연결 상태 확인 스레드 self.thread_heartbeat_stop = threading.Event() self.th_heartbeat = threading.Thread(target=self.thread_heartbeat) self.th_heartbeat.setDaemon(True) self.th_heartbeat.start() # 로봇 클라이언트 self.__rblib = RobotClient(self._host, self._port) self.__rblib_sys = RobotClient(self._host, self._port) # Stop, Pause, Resume 제어용 클라이언트 # 공유메모리 API, 서보상태 변수 self._shm = ZeusShm() self.shm_update_counter = -1 self.shm_wait_counter = 0 self.servo_status = None self.emo_status = None self.__initialize() def __initialize(self): res = self.open(check_servo=False) if res[0] == False: raise ZeusRobotException(res[1], res[2], "__initialize") # # TEST용 ZC 상태 체크 (ZSP는 ZC 시스템 관리자의 영향을 받지 않으므로 제거할 수 있으면 제거) # res = self.ioctrl(130, 0, 0xffffffff, 0, 0xffffffff) # if res[0] == False: # raise ZeusRobotException(res[1], res[2], "__initialize") # st = (res[1] >> 4) & 0x0F # if st >= 10: # raise ZeusRobotException(ERR_ZERO_ROBOT, 2, "__initialize") self.close() def __del__(self): try: self.__rblib_sys.abortm() self.__rblib_sys.close() self.__rblib.rel_permission() self.__rblib.close() except: pass def thread_heartbeat(self): while True: if not self.thread_heartbeat_stop.is_set(): # print("CHECK") try: msg = "OK" data = self.s_hb.recv(1024) if data.decode() != msg: raise socket.error except: self._isConnected = False self.s_hb.close() self.thread_heartbeat_stop.set() else: try: self.s_hb = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s_hb.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s_hb.connect((self._host, 8001)) self._isConnected = True self.thread_heartbeat_stop.clear() except: pass time.sleep(0.1) def show_version_mcs(self): res = self.__rblib.version() if res[0] == False: raise ZeusRobotException(res[1], res[2], "show_version_mcs") return res def show_version_api(self): return __version__ def open(self, check_servo = True, check_zsp = True): if check_zsp: if not self._check_zsp_ready(): raise ZeusRobotException(ERR_ZERO_ROBOT, 12, "open") if check_servo: self._check_svsts() if not self._isOpen: ret = self.__rblib.open(3) if ret: time.sleep(1) ret = self.__rblib_sys.open(3) if ret: self._isOpen = True self.__acq_permission() res = self.__rblib.changetool(0) if res[0] == False: raise ZeusRobotException(res[1], res[2], "open") res = self.__rblib.asyncm(2) if res[0] == False: raise ZeusRobotException(res[1], res[2], "open") self.__rel_permission() return [True] else: raise ZeusRobotException(ERR_ZERO_ROBOT, 3, "open") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 4, "open") else: self._isOpen = True return [True] def close(self): if self._isOpen: try: self.__rblib_sys.abortm() self.__rblib_sys.close() self.__rblib.rel_permission() self.__rblib.close() except Exception as e: raise ZeusRobotException(ERR_ZERO_ROBOT, 5, "close", e) self._isOpen = False return True else: try: self.__rblib_sys.abortm() self.__rblib_sys.close() self.__rblib.rel_permission() # 에러 방지 차원에서 호출 self.__rblib.close() except: pass return True def __acq_permission(self): res = self.__rblib.acq_permission() if res[0] == False: raise ZeusRobotCriticalException(res[1], res[2], "__acq_permission") return res def __rel_permission(self): res = self.__rblib.rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "__rel_permission") return res def ioctrl(self, wordno, dataL, maskL, dataH, maskH): res = self.__rblib.ioctrl(wordno, dataL, maskL, dataH, maskH) if res[0] == False: raise ZeusRobotException(res[1], res[2], "ioctrl") return res def is_open(self): return self._isOpen def is_permitted(self): return self._isPermit def is_moving(self): return self._isMoving def is_paused(self): return self._isPause def is_async(self): return self._isAsync def get_servo_status(self): res = self.ioctrl(128, 0, 0xffffffff, 0, 0xffffffff) if res[0] == False: raise ZeusRobotException(res[1], res[2], "get_servo_status") if zeus_common._bitflag(res[1], 1): # Emergency Stop return -1 if zeus_common._bitflag(res[1], 0): # Servo ON return 1 return 0 # Servo OFF def set_servo_off(self): self.__acq_permission() res = self.__rblib.svctrl(2) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "set_servo_off") else: return True def get_system_status(self, type): res = self.__syssts(type) if not res[0]: raise ZeusRobotException(res[1], res[2], "get_system_status") return res def async_motion(self, enable): res = zeus_common._chkparam(enable, p_type=bool) if not res[0]: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "async_motion") self.__acq_permission() if enable: res = self.__rblib.asyncm(1) elif not enable: res = self.__rblib.asyncm(2) self.__rel_permission() if not res[0]: raise ZeusRobotException(res[1], res[2], "async_motion") else: if enable: self._isAsync = True elif not enable: self._isAsync = False return True def wait_motion(self): try: self._check_svsts() except ZeusRobotException as e: raise e self.__acq_permission() res = self.__rblib.joinm() self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "wait_motion") else: return True def stop(self): try: self._check_svsts() except ZeusRobotException as e: raise e res = self.__rblib_sys.abortm() if res[0] == False: raise ZeusRobotException(res[1], res[2], "stop") else: return True def pause(self, timeout): try: self._check_svsts() except ZeusRobotException as e: raise e res = self.__rblib_sys.suspendm(timeout) if res[0] == False: raise ZeusRobotException(res[1], res[2], "pause") else: return True def resume(self): try: self._check_svsts() except ZeusRobotException as e: raise e res = self.__rblib_sys.resumem() if res[0] == False: raise ZeusRobotException(res[1], res[2], "resume") else: return True def override(self, ovr): res = zeus_common._chkparam(ovr, p_type=[int, float], min=1, max=100) if not res[0]: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "override") # __mpovr = [lin_speed, jnt_speed, acctime, dacctime, posture, passm, overlap, zone, pose_speed, ik_solver_option] self.__ovr = float(ovr) / 100. self.__mpovr = [x * self.__ovr for x in self.__mp[:2]] + \ self.__mp[2:8] + [self.__mp[8] * self.__ovr] + [self.__mp[9]] return True def set_motionparam(self, *args, **kwargs): if len(args) == 1 and isinstance(args[0], _MotionParam): self.__mpdef = args[0].mp2list() res = self.__changeMotionParam(args[0]) if res[0] == False: raise ZeusRobotException(res[1], res[2], "set_motionparam") else: return True else: mp = _MotionParam(*args, **kwargs) self.__mpdef = mp.mp2list() res = self.__changeMotionParam(mp) if res[0] == False: raise ZeusRobotException(res[1], res[2], "set_motionparam") else: return True def get_motionparam(self): try: mp = _MotionParam(self.__mp) except Exception as e: raise ZeusRobotException(ERR_ZERO_ROBOT, 6, "get_motionparam", e) return mp def clear_motionparam(self): try: mp = _MotionParam(self.__mp) mp.clear() self.__mp = mp.mp2list() except Exception as e: raise ZeusRobotException(ERR_ZERO_ROBOT, 7, "clear_motionparam", e) return True def set_tooloffset(self, *args, **kwargs): p = zeus_common._args([0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['id', 'offx', 'offy', 'offz', 'offrz', 'offry', 'offrx'], [int, float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_tooloffset") else: self.__acq_permission() res = self.__rblib.settool(p[1], p[2], p[3], p[4], p[5], p[6], p[7]) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "set_tooloffset") return res def change_tool(self, tid): res = zeus_common._chkparam(tid, p_type=int, min=0, max=8) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "change_tool") else: self.__acq_permission() res = self.__rblib.changetool(tid) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "change_tool") return True def set_mdo(self, mdoid, portno, value, mode, distance): res = zeus_common._chkparam(mdoid, p_type=int, min=1, max=8) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_mdo") res = zeus_common._chkparam(portno, p_type=int, min=0) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_mdo") res = zeus_common._chkparam(value, p_type=int, min=0, max=1) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_mdo") res = zeus_common._chkparam(mode, p_type=int, min=1, max=2) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_mdo") res = zeus_common._chkparam(distance, p_type=[int, float], min=0.0) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "set_mdo") self.__acq_permission() res = self.__rblib.set_mdo(mdoid, portno, value, mode, distance) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "set_mdo") return True def enable_mdo(self, bitfield): res = zeus_common._chkparam(bitfield, p_type=int, min=0, max=255) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "enable_mdo") self.__acq_permission() res = self.__rblib.enable_mdo(bitfield) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "enable_mdo") return True def disable_mdo(self, bitfield): res = zeus_common._chkparam(bitfield, p_type=int, min=0, max=255) if res[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "disable_mdo") self.__acq_permission() res = self.__rblib.disable_mdo(bitfield) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "disable_mdo") return True def get_position(self): res = self.__rblib.mark_mt() if res[0] == False: raise ZeusRobotException(res[1], res[2], "get_position") else: return _Position(res[1:7], _BASE, res[7], res[9]) def get_joint(self): res = self.__rblib.jmark() if res[0] == False: raise ZeusRobotException(res[1], res[2], "get_joint") else: return _Joint(res[1:7]) def Joint2Position(self, *jnt): v = [] for m in jnt: if isinstance(m, list): for c in m: if isinstance(c, _Joint): a = c.jnt2list() b = self.__rblib.j2r_mt(a[0], a[1], a[2], a[3], a[4], a[5], 1) if b[0] == True: v.append(_Position(b[1:7], _BASE, b[7], b[9])) else: raise ZeusRobotException(b[1], b[2], "Joint2Position") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "Joint2Position") else: if isinstance(m, _Joint): a = m.jnt2list() b = self.__rblib.j2r_mt(a[0], a[1], a[2], a[3], a[4], a[5], 1) if b[0] == True: v.append(_Position(b[1:7], _BASE, b[7], b[9])) else: raise ZeusRobotException(b[1], b[2], "Joint2Position") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "Joint2Position") if len(v) > 1: return v else: return v[0] def Position2Joint(self, *pos): _ik_solver_option = self.__mp[9] v = [] for m in pos: if isinstance(m, list): for c in m: if isinstance(c, _Position): a = c._position(True) b = self.__rblib.r2j_mt(a[0], a[1], a[2], a[3], a[4], a[5], a[7], 1, a[8], _ik_solver_option) if b[0] == True: v.append(_Joint(b[1:7])) else: raise ZeusRobotException(b[1], b[2], "Position2Joint") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "Position2Joint") else: if isinstance(m, _Position): a = m._position(True) b = self.__rblib.r2j_mt(a[0], a[1], a[2], a[3], a[4], a[5], a[7], 1, a[8], _ik_solver_option) if b[0] == True: v.append(_Joint(b[1:7])) else: raise ZeusRobotException(b[1], b[2], "Position2Joint") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "Position2Joint") if len(v) > 1: return v else: return v[0] def adjust_mt(self, pos, str_x, str_y, str_z, str_rz, str_ry, str_rx): b = pos._position(True) res = self.__rblib.getmt(b[0], b[1], b[2], b[3], b[4], b[5], b[7], 1, b[8], str_x, str_y, str_z, str_rz, str_ry, str_rx) if res[0] == False: raise ZeusRobotException(res[1], res[2], "adjust_mt") else: return int(res[1]) def set_use_mt(self, flag): global _use_mt _use_mt = bool(flag) def _check_shm_update_counter(self): """ _check_svsts 호출 시 공유 메모리 업데이트가 정상적으로 이루어지고 있는지 업데이트 카운터를 체크 Raises: ZeusRobotException: 공유 메모리 업데이트 실패 예외 발생 """ while True: tmp_shm_update_counter = int(self._shm.read(0x0008, 1)[0]) if self.shm_update_counter < tmp_shm_update_counter: self.shm_update_counter = tmp_shm_update_counter self.shm_wait_counter = 0 break else: self.shm_wait_counter += 1 time.sleep(0.01) if self.shm_wait_counter > 20: self.shm_wait_counter = 0 raise ZeusRobotException(10, 24, "_check_shm_update_counter") def _check_svsts(self): self._check_shm_update_counter() self.servo_status = self._shm.read(0x3260, 1) # ON : 4 self.emo_status = self._shm.read(0x3266, 1) # OFF : 0 if self.emo_status[0] == RobotClient._EMO_ON: if self.servo_status[0] == RobotClient._SVSTS_ON: raise ZeusRobotException(10, 22, "_check_svsts") # 실제로 불가능한 케이스 elif self.servo_status[0] == RobotClient._SVSTS_OFF: raise ZeusRobotException(10, 22, "_check_svsts") else: # RobotClient._EMO_OFF if self.servo_status[0] == RobotClient._SVSTS_ON: pass elif self.servo_status[0] == RobotClient._SVSTS_OFF: raise ZeusRobotException(10, 23, "_check_svsts") def _check_zsp_ready(self): """ ZSP의 상태가 Servo_Off_Ready/Ready/Run 상태인지 확인 """ self._check_shm_update_counter() self.system_status = self._shm.read(0x3274, 1) if self.system_status[0] == 2 or self.system_status[0] == 3 or self.system_status[0] == 4: return True else: return False def move(self, *cmd): try: self._check_svsts() except ZeusRobotException as e: raise e self.__mp = self.__mpdef[:] motion_id = [] for m in cmd: if isinstance(m, list): for c in m: if isinstance(c, _Position): res = self._ptpmove(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: motion_id.append(res[1]) elif isinstance(c, _Joint): res = self._jntmove(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: motion_id.append(res[1]) elif isinstance(c, _MotionParam): res = self.__changeMotionParam(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: motion_id.append(res[1]) else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move") else: if isinstance(m, _Position): res = self._ptpmove(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: motion_id.append(res[1]) elif isinstance(m, _Joint): res = self._jntmove(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: motion_id.append(res[1]) elif isinstance(m, _MotionParam): res = self.__changeMotionParam(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move") else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move") return [True] + motion_id def home(self): try: self._check_svsts() except ZeusRobotException as e: raise e res = self._jntmove(_Joint(0, 0, 0, 0, 0, 0)) if res[0] == False: raise ZeusRobotException(res[1], res[2], "home") return res def move_line(self, *cmd): try: self._check_svsts() except ZeusRobotException as e: raise e self.__mp = self.__mpdef[:] motion_id = [] for m in cmd: if isinstance(m, list): for c in m: if isinstance(c, _Position) or isinstance(c, _Joint): res = self._cpmove(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_line") else: motion_id.append(res[1]) elif isinstance(c, _MotionParam): res = self.__changeMotionParam(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_line") else: motion_id.append(res[1]) else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_line") else: if isinstance(m, _Position) or isinstance(m, _Joint): res = self._cpmove(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_line") else: motion_id.append(res[1]) elif isinstance(m, _MotionParam): res = self.__chMotion(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_line") else: motion_id.append(res[1]) else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_line") return [True] + motion_id def move_optline(self, *cmd): try: self._check_svsts() except ZeusRobotException as e: raise e self.__mp = self.__mpdef[:] motion_id = [] for m in cmd: if isinstance(m, list): for c in m: if isinstance(c, _Position) or isinstance(c, _Joint): res = self._optcpmove(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_optline") else: motion_id.append(res[1]) elif isinstance(c, _MotionParam): res = self.__changeMotionParam(c) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_optline") else: motion_id.append(res[1]) else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_optline") else: if isinstance(m, _Position) or isinstance(m, _Joint): res = self._optcpmove(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_optline") else: motion_id.append(res[1]) elif isinstance(m, _MotionParam): res = self.__chMotion(m) if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_optline") else: motion_id.append(res[1]) else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_optline") return [True] + motion_id def move_rel_tool(self, *args, **kwargs): try: self._check_svsts() except ZeusRobotException as e: raise e p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dx', 'dy', 'dz', 'drz', 'dry', 'drx'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_rel_tool") else: _speed = self.__mpovr[0] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] self.__acq_permission() res = self.__rblib.trmove(p[1], p[2], p[3], p[4], p[5], p[6], _speed, _acct, _dacct) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_rel_tool") else: return res def move_rel_jnt(self, *args, **kwargs): try: self._check_svsts() except ZeusRobotException as e: raise e p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dj1', 'dj2', 'dj3', 'dj4', 'dj5', 'dj6'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_rel_jnt") else: _speed = self.__mpovr[1] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] self.__acq_permission() res = self.__rblib.jntrmove(p[1], p[2], p[3], p[4], p[5], p[6], _speed, _acct, _dacct) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_rel_jnt") else: return res def move_rel_line(self, *args, **kwargs): try: self._check_svsts() except ZeusRobotException as e: raise e p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dx', 'dy', 'dz', 'drz', 'dry', 'drx'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_rel_line") else: _speed = self.__mpovr[0] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] self.__acq_permission() res = self.__rblib.cprmove(p[1], p[2], p[3], p[4], p[5], p[6], _speed, _acct, _dacct) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_rel_line") else: return res def move_arc(self, pos1, pos2, orientation): try: self._check_svsts() except ZeusRobotException as e: raise e if not isinstance(pos1, _Position): raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") if not isinstance(pos2, _Position): raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") res = zeus_common._chkparam(orientation, p_type=int, min=0, max=2) if not res[0]: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") _speed = self.__mpovr[0] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] _cx, _cy, _cz, _crz, _cry, _crx, _, _cps_1 = self.get_position().pos2list()[0:8] _x1, _y1, _z1, _rz1, _ry1, _rx1, _, _ps1 = pos1._position()[0:8] _x2, _y2, _z2, _rz2, _ry2, _rx2, _, _ps2 = pos2._position()[0:8] if _cps_1 != _ps1 or _cps_1 != _ps2: raise ZeusRobotException(ERR_ZERO_ROBOT, 8, "move_arc") self.__acq_permission() res = self.__rblib.arcmove( _x1, _y1, _z1, _rz1, _ry1, _rx1, _ps1, 1, _x2, _y2, _z2, _rz2, _ry2, _rx2, _ps2, 1, _speed, _acct, _dacct, orientation) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_arc") else: return res def move_circle(self, pos1, pos2, orientation): try: self._check_svsts() except ZeusRobotException as e: raise e if not isinstance(pos1, _Position): raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") if not isinstance(pos2, _Position): raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") res = zeus_common._chkparam(orientation, p_type=int, min=0, max=1) if not res[0]: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "move_arc") _speed = self.__mpovr[0] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] _x1, _y1, _z1, _rz1, _ry1, _rx1, _, _ps1 = pos1._position()[0:8] _x2, _y2, _z2, _rz2, _ry2, _rx2, _, _ps2 = pos2._position()[0:8] self.__acq_permission() res = self.__rblib.cirmove( _x1, _y1, _z1, _rz1, _ry1, _rx1, _ps1, 1, _x2, _y2, _z2, _rz2, _ry2, _rx2, _ps2, 1, _speed, _acct, _dacct, orientation) self.__rel_permission() if res[0] == False: raise ZeusRobotException(res[1], res[2], "move_circle") else: return res # Internal Methods def __changeMotionParam(self, param): if not isinstance(param, _MotionParam): raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "__changeMotionParam") else: # Check parameter update new_mp = param.mp2list() update_overlap = False update_passm = False update_zone = False update_slspeed = False if new_mp[6] != self.__mp[6]: # Check overlap update update_overlap = True if new_mp[5] != self.__mp[5]: # Check passm update update_passm = True if new_mp[7] != self.__mp[7]: # Check zone update update_zone = True if new_mp[8] != self.__mp[8]: # Check slspeed update update_slspeed = True if self.__first_parameter_update: self.__first_parameter_update = False update_overlap = True update_passm = True update_zone = True update_slspeed = True self.__mp = new_mp if self.__mp[8] < 1.0: self.__mp[8] = 1.0 # __mpovr = [lin_speed, jnt_speed, acctime, dacctime, posture, passm, overlap, zone, pose_speed, ik_solver_option] self.__mpovr = [x * self.__ovr for x in self.__mp[:2]] + \ self.__mp[2:8] + [self.__mp[8] * self.__ovr] + [self.__mp[9]] self.__acq_permission() if update_overlap: res = self.__rblib.overlap(self.__mp[6]) if res[0] == False: raise ZeusRobotException(res[1], res[2], "__changeMotionParam") if update_passm: res = self.__rblib.passm(self.__mp[5]) if res[0] == False: raise ZeusRobotException(res[1], res[2], "__changeMotionParam") if update_zone: res = self.__rblib.zone(self.__mp[7]) if res[0] == False: raise ZeusRobotException(res[1], res[2], "__changeMotionParam") if update_slspeed: res = self.__rblib.slspeed(self.__mp[8]) if res[0] == False: raise ZeusRobotException(res[1], res[2], "__changeMotionParam") self.__rel_permission() return [True] def __syssts(self, type): return self.__rblib.syssts(type) def __plsmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): """ Private """ return self.__rblib.plsmove(ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct) def __mtrmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): """ Private """ return self.__rblib.mtrmove(ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct) def _jntmove(self, param): _ax1, _ax2, _ax3, _ax4, _ax5, _ax6 = param.jnt2list()[0:6] _speed = self.__mpovr[1] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] self._isMoving = True self.__acq_permission() res = self.__jntmove(_ax1, _ax2, _ax3, _ax4, _ax5, _ax6, _speed, _acct, _dacct) self.__rel_permission() self._isMoving = False if res[0] == False: raise ZeusRobotException(res[1], res[2], "_jntmove") else: return res def __jntmove(self, ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct): return self.__rblib.jntmove(ax1, ax2, ax3, ax4, ax5, ax6, speed, acct, dacct) def _ptpmove(self, param): _x, _y, _z, _rz, _ry, _rx = param._position()[0:6] if param.pos[7] == -1: _posture = self.__mpovr[4] else: _posture = param.pos[7] _multiturn = param.pos[8] _rbcoord = 1 _speed = self.__mpovr[1] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] _ik_solver_option = self.__mpovr[9] # TBD # if self.accel_limit: # _speed = self.convert_accel_ptp(_x, _y, _z, _rz, _ry, _rx, _posture, # _rbcoord, _multiturn, _ik_solver_option, _speed, _acct, _dacct) self._isMoving = True self.__acq_permission() if _use_mt: res = self.__ptpmove_mt(_x, _y, _z, _rz, _ry, _rx, _posture, _rbcoord, _multiturn, _ik_solver_option, _speed, _acct, _dacct) else: res = self.__ptpmove(_x, _y, _z, _rz, _ry, _rx, _posture, _rbcoord, _speed, _acct, _dacct) self.__rel_permission() self._isMoving = False if res[0] == False: raise ZeusRobotException(res[1], res[2], "_ptpmove") else: return res def __ptpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): return self.__rblib.ptpmove(x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct) def __ptpmove_mt(self, x, y, z, rz, ry, rx, posture, rbcoord, multiturn, ik_solver_option, speed, acct, dacct): return self.__rblib.ptpmove_mt(x, y, z, rz, ry, rx, posture, rbcoord, multiturn, ik_solver_option, speed, acct, dacct) def _cpmove(self, param): if isinstance(param, _Position): _x, _y, _z, _rz, _ry, _rx = param._position()[0:6] if param.pos[7] == -1: _posture = self.__mpovr[4] else: _posture = param.pos[7] elif isinstance(param, _Joint): p = self.Joint2Position(param).pos _x, _y, _z, _rz, _ry, _rx = p[0:6] _posture = p[7] else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "_cpmove") _rbcoord = 1 _speed = self.__mpovr[0] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] # TBD # if self.accel_limit: # _speed = self.convert_accel_cp(_x, _y, _z, _rz, _ry, _rx, _posture, # _rbcoord, _speed, _acct, _dacct) self._isMoving = True self.__acq_permission() res = self.__cpmove(_x, _y, _z, _rz, _ry, _rx, _posture, _rbcoord, _speed, _acct, _dacct) self.__rel_permission() self._isMoving = False if res[0] == False: raise ZeusRobotException(res[1], res[2], "_cpmove") return res def __cpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): return self.__rblib.cpmove(x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct) def _optcpmove(self, param): if isinstance(param, _Position): _x, _y, _z, _rz, _ry, _rx = param._position()[0:6] if param.pos[7] == -1: _posture = self.__mpovr[4] else: _posture = param.pos[7] elif isinstance(param, _Joint): p = self.Joint2Position(param).pos _x, _y, _z, _rz, _ry, _rx = p[0:6] _posture = p[7] else: raise ZeusRobotException(ERR_ZERO_ROBOT, 1, "_optcpmove") _rbcoord = 1 _speed = self.__mpovr[1] _acct = self.__mpovr[2] _dacct = self.__mpovr[3] # TBD # if self.accel_limit: # _speed = self.convert_accel_optcp(_x, _y, _z, _rz, _ry, _rx, _posture, # _rbcoord, _speed, _acct, _dacct) self._isMoving = True self.__acq_permission() res = self.__optcpmove(_x, _y, _z, _rz, _ry, _rx, _posture, _rbcoord, _speed, _acct, _dacct) self.__rel_permission() self._isMoving = False if res[0] == False: raise ZeusRobotException(res[1], res[2], "_optcpmove") return res def __optcpmove(self, x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct): u"""Private (非公開)""" return self.__rblib.optcpmove(x, y, z, rz, ry, rx, posture, rbcoord, speed, acct, dacct) class _Base(object): """ Base 클래스 월드 좌표계를 정의. 월드 좌표계의 Position, Coordinate 클래스에서 사용하는 더미 클래스 Methods: get_I_matrix(self): 단위 행렬을 반환 eulerangle(self): 오일러 각([0.0, 0.0, 0.0] 벡터)을 반환 Example: _BASE = _Base() """ def __init__(self): self.__I = zeus_common._matI(4) def get_I_matrix(self): ''' 단위 행렬을 반환 :return: list ''' return self.__I def eulerangle(self): ''' [0.0, 0.0, 0.0] 벡터를 반환 :return: list ''' return [0.0, 0.0, 0.0] _BASE = _Base() class _ParentContainer(object): """ ParentContainer 클래스 Position, Coordinate 클래스의 부모 클래스 Attributes: pos(list): 좌표계 파라미터 [x, y, z, rz, ry, rx, parent]. 상속받은 클래스의 self.pos 속성에 초기값으로 적용됨 Methods: matrix(self): pos 값을 행렬 형태로 반환 """ pos = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, _BASE] # 상속받은 클래스의 self.pos 속성에 초기값으로 적용됨 def __init__(self): pass def matrix(self): m = zeus_common._matEuler(self.pos[3], self.pos[4], self.pos[5]) m = zeus_common._mdotm(zeus_common._matShift(self.pos[0], self.pos[1], self.pos[2]), m) m = zeus_common._mdotm(self.pos[6].get_I_matrix(), m) return m class _Position(_ParentContainer): """ Position 클래스 직교 좌표계에서의 교시점을 정의 Attributes: pos: 교시점 파라미터 [x, y, z, rz, ry, rx, posture, parent, multiturn] Methods: __param_position(self, *args, **kwargs): 파라미터 해석 _position(self): 교시점을 리스트 형식으로 반환 has_mt(self): multiturn 여부를 반환 pos2list(self): 교시점을 리스트 형식으로 반환 pos2dict(self): 교시점을 딕셔너리 형식으로 반환 copy(self): Position 객체를 복사하여 반환 clear(self): 교시점 초기화 replace(self, *args, **kwargs): 교시점을 치환하여 반환 offset(self, *args, **kwargs): 교시점 쉬프트 (새로운 객체를 반환) shift(self, *args, **kwargs): 교시점 쉬프트 (기존 객체를 갱신하여 반환) """ __defpos = [0., 0., 0., 0., 0., 0., _BASE, -1, 0xFF000000] # pos 속성의 초기값 def __init__(self, *args, **kwargs): """ Poistion 객체 생성 Args: x(float): 위치 (월드좌표계 X 좌표) y(float): 위치 (월드좌표계 Y 좌표) z(float): 위치 (월드좌표계 Z 좌표) rz(float): 방향 (Z-Y-X 오일러 각도의 Rz 자세) ry(float): 방향 (Z-Y-X 오일러 각도의 Ry 자세) rx(float): 방향 (Z-Y-X 오일러 각도의 Rx 자세) parent(_Coordinate or _Base): 위치, 방향 데이터를 월드좌표계로 변환하기 위한 변환 행렬 posture(int): 로봇 암의 자세 (0-7) multiturn(int): multiturn 값 """ self.pos = _Position.__defpos self.__param_position(*args, **kwargs) def __param_position(self, *args, **kwargs): """ Position 파라미터 값을 설정하는 내부 메서드 Args: *args : 파라미터 값 **kwargs : 키를 포함한 파라미터 값 Returns: bool : 실행 여부 """ p = zeus_common._args(self.pos, ['x', 'y', 'z', 'rz', 'ry', 'rx', 'parent', 'posture', 'multiturn'], [float, float, float, float, float, float, None, int, int], *args, **kwargs) if p[0] == False: raise PositionException(ERR_ZERO_POSITION, 1, "__param_position", p[1]) else: self.pos = p[1:] if isinstance(self.pos[6], int): self.pos[8] = self.pos[7] self.pos[7] = self.pos[6] self.pos[6] = _Position.__defpos[6] _Position.__defpos = self.pos return True def _position(self, force_use_mt=False): """ 부모 좌표계로 변환된 교시점을 리스트 타입으로 반환 Args: force_use_mt: multiturn 사용 강제 옵션 Returns: list : 교시점 """ if isinstance(self.pos[6], _Base): if _use_mt or force_use_mt: return self.pos else: return self.pos[:8] try: mp = zeus_common._mdotm(zeus_common._matShift(self.pos[0], self.pos[1], self.pos[2]), zeus_common._matEuler(self.pos[3], self.pos[4], self.pos[5])) mw = self.pos[6].matrix() m = zeus_common._mdotm(mw, mp) if _use_mt or force_use_mt: return [m[0][3], m[1][3], m[2][3]] + zeus_common._eulMatrix(m) + [self.pos[6], self.pos[7], self.pos[8]] else: # _not_mt return [m[0][3], m[1][3], m[2][3]] + zeus_common._eulMatrix(m) + [self.pos[6], self.pos[7]] except Exception as e: raise PositionException(ERR_ZERO_POSITION, 2, "_position", e) def has_mt(self): """ multiturn 값을 갖는지 여부를 반환 Returns: bool : multitrn 파라미터 여부 반환 """ if (self.pos[8] & 0xFF000000) == 0xFF000000: return False else: return True def pos2list(self): """ 교시점을 리스트 타입으로 반환 Returns: list : 교시점 """ if _use_mt: return self.pos[:] else: return self.pos[:8] def pos2dict(self): """ 교시점을 딕셔너리 타입으로 반환 Returns: dict : 교시점 """ if _use_mt: k = ['x', 'y', 'z', 'rz', 'ry', 'rx', 'parent', 'posture', 'multiturn'] else: # _not_mt k = ['x', 'y', 'z', 'rz', 'ry', 'rx', 'parent', 'posture'] return dict(zip(k, self.pos)) def copy(self): """ 동일한 정보를 가진 Position 객체를 반환 Returns: _Position : 메서드를 호출한 Position 객체의 클론 객체 """ return _Position(self.pos[:]) def clear(self): """ 교시점 데이터 초기화 (self.pos, _Position.__defpos) Returns: None """ self.pos = [0., 0., 0., 0., 0., 0., _BASE, -1, 0xFF000000] _Position.__defpos = [0., 0., 0., 0., 0., 0., _BASE, -1, 0xFF000000] def replace(self, *args, **kwargs): """ Position 파라미터 성분 변경 Args: *args: 변경 값 **kwargs: 키를 포함한 변경 값 Returns: _Position : 파라미터 값이 변경된 Position 객체 """ self.__param_position(*args, **kwargs) return self def offset(self, *args, **kwargs): """ 좌표 값을 시프트한 새로운 오브젝트를 반환 (인자로 받은 오브젝트는 변경되지 않음) Args: *args: 시프트 값 **kwargs: 키를 포함한 시프트 값 Returns: _Position : 값이 시프트 된 새로운 Position 객체 """ p = self.copy() return p.shift(*args, **kwargs) def shift(self, *args, **kwargs): """ 오브젝트의 좌표 값을 시프트 (인자로 받은 오브젝트를 갱신하여 반환) Args: *args: 시프트 값 **kwargs: 시프트 값 Returns: _Position : 값이 시프트 된 인자로 받은 Position 객체 """ p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dx', 'dy', 'dz', 'drz', 'dry', 'drx'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise PositionException(ERR_ZERO_POSITION, 1, "shift", p[1]) else: self.pos[0:6] = [self.pos[i] + p[i + 1] for i in range(6)] return self class _Joint(object): """ Joint 클래스 로봇 각 축의 교시점을 정의 Attributes: pos: 교시점 파라미터 [x, y, z, rz, ry, rx, posture, parent, multiturn] Methods: __param_joint(self, *args, **kwargs): 파라미터 해석 replace(self, *args, **kwargs): 교시점을 치환하여 반환 jnt2list(self): 교시점을 리스트 형식으로 반환 jnt2dict(self): 교시점을 딕셔너리 형식으로 반환 copy(self): Joint 객체를 복사하여 반환 clear(self): 교시점 초기화 offset(self, *args, **kwargs): 교시점 쉬프트 (새로운 객체 반환) shift(self, *args, **kwargs): 교시점 쉬프트 (기존 객체 갱신) """ __defjnt = [0., 0., 0., 0., 0., 0.] # jnt 속성의 초기값 def __init__(self, *args, **kwargs): """ Joint 객체 생성 Args: j1(float): J1 각도 [deg] j2(float): J2 각도 [deg] j3(float): J3 각도 [deg] j4(float): J4 각도 [deg] j5(float): J5 각도 [deg] j6(float): J6 각도 [deg] """ self.jnt = _Joint.__defjnt self.__param_joint(*args, **kwargs) def __param_joint(self, *args, **kwargs): """ Joint 파라미터 값을 설정하는 내부 메서드 Args: *args : 파라미터 값 **kwargs : 키를 포함한 파라미터 값 Returns: bool : 실행 여부 """ p = zeus_common._args(self.jnt, ['j1', 'j2', 'j3', 'j4', 'j5', 'j6'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise JointException(ERR_ZERO_JOINT, 1, "__param_joint", p[1]) else: self.jnt = p[1:] _Joint.__defjnt = self.jnt return True def jnt2list(self): """ 교시점을 리스트 타입으로 반환 Returns: list : 교시점 """ return self.jnt[:] def jnt2dict(self): """ 교시점을 딕셔너리 타입으로 반환 Returns: dict : 교시점 """ k = ['j1', 'j2', 'j3', 'j4', 'j5', 'j6'] return dict(zip(k, self.jnt[:])) def copy(self): """ 동일한 정보를 가진 Position 객체를 반환 Returns: _Position : 메서드를 호출한 Position 객체의 클론 객체 """ return _Joint(self.jnt[:]) def clear(self): """ 교시점 데이터 초기화 (self.pos, Position.__defpos) Returns: None """ self.jnt = [0., 0., 0., 0., 0., 0.] _Position.__defjnt = [0., 0., 0., 0., 0., 0.] def replace(self, *args, **kwargs): """ Position 파라미터 성분 변경 Args: *args: 변경 값 **kwargs: 키를 포함한 변경 값 Returns: _Position : 파라미터 값이 변경된 Position 객체 """ self.__param_joint(*args, **kwargs) return self def offset(self, *args, **kwargs): """ 좌표 값을 시프트한 새로운 오브젝트를 반환 (인자로 받은 오브젝트는 변경되지 않음) Args: *args: 시프트 값 **kwargs: 키를 포함한 시프트 값 Returns: _Position : 값이 시프트 된 새로운 Position 객체 """ p = self.copy() return p.shift(*args, **kwargs) def shift(self, *args, **kwargs): """ 오브젝트의 좌표 값을 시프트 (인자로 받은 오브젝트를 갱신하여 반환) Args: *args: 시프트 값 **kwargs: 시프트 값 Returns: _Position : 값이 시프트 된 인자로 받은 Position 객체 """ p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dj1', 'dj2', 'dj3', 'dj4', 'dj5', 'dj6'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise JointException(ERR_ZERO_JOINT, 1, "shift", p[1]) else: self.jnt[0:6] = [self.jnt[i] + p[i + 1] for i in range(6)] return self class _Coordinate(_ParentContainer): """ 직교 좌표계에 정의된 Coordinate 클래스 Attributes: pos: 좌표계 파라미터 [x, y, z, rz, ry, rx, parent] Methods: __param_coordinate(self, *args, **kwargs): 파라미터 해석 coord2list(self): 좌표계를 리스트 타입으로 반환 coord2dict(self): 좌표계를 딕셔너리 타입으로 반환 world2base(): 월드좌표계를 베이스좌표계로 변환 base2world(): 베이스좌표계를 월드좌표계로 변환 copy(self): 좌표계 객체를 복사하여 반환 clear(self): 좌표계 초기화 replace(self, *args, **kwargs): 좌표계를 치환하여 반환 shift(self, *args, **kwargs): 좌표계 쉬프트 (기존 객체를 갱신하여 반환) inv(self): 좌표계를 역변환하여 반환 """ __defpos = [0., 0., 0., 0., 0., 0., _BASE, -1, 0xFF000000] # pos 속성의 초기값 def __init__(self, *args, **kwargs): """ Coordinate 객체 생성 Args: x(float): 위치 (월드좌표계 X 좌표) y(float): 위치 (월드좌표계 Y 좌표) z(float): 위치 (월드좌표계 Z 좌표) rz(float): 방향 (Z-Y-X 오일러 각도의 Rz 자세) ry(float): 방향 (Z-Y-X 오일러 각도의 Ry 자세) rx(float): 방향 (Z-Y-X 오일러 각도의 Rx 자세) parent(_Coordinate or _Base): 위치, 방향 데이터를 월드좌표계로 변환하기 위한 변환 행렬 """ self.pos = _Coordinate.__defpos self.__param_coordinate(*args, **kwargs) def __param_coordinate(self, *args, **kwargs): p = zeus_common._args(self.pos, ['x', 'y', 'z', 'rz', 'ry', 'rx', 'parent'], [float, float, float, float, float, float, None], *args, **kwargs) if p[0] == False: raise CoordinateException(ERR_ZERO_COORDINATE, 1, "__param_coordinate", p[1]) else: self.pos = p[1:] if not isinstance(self.pos[6], _ParentContainer) and not isinstance(self.pos[6], _Base): raise CoordinateException(ERR_ZERO_COORDINATE, 2, "__param_coordinate") _Coordinate.__defpos = self.pos return True def coord2list(self): return self.pos[:] def coord2dict(self): k = ['x', 'y', 'z', 'rz', 'ry', 'rx', 'parent'] return dict(zip(k, self.pos)) def world2base(self, wx, wy, wz, wrz, wry, wrx): try: mp = zeus_common._mdotm(zeus_common._matShift(wx, wy, wz), zeus_common._matEuler(wrz, wry, wrx)) mw = zeus_common._minv(self.matrix()) m = zeus_common._mdotm(mw, mp) except Exception as e: raise CoordinateException(ERR_ZERO_COORDINATE, 3, "world2base", e) return [m[0][3], m[1][3], m[2][3]] + zeus_common._eulMatrix(m) def base2world(self, bx, by, bz, brz, bry, brx): try: mp = zeus_common._mdotm(zeus_common._matShift(bx, by, bz), zeus_common._matEuler(brz, bry, brx)) mw = self.matrix() m = zeus_common._mdotm(mw, mp) except Exception as e: raise CoordinateException(ERR_ZERO_COORDINATE, 4, "base2world", e) return [m[0][3], m[1][3], m[2][3]] + zeus_common._eulMatrix(m) def copy(self): p = _Coordinate(self.pos[:]) return p def clear(self): self.pos = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, _BASE] _Coordinate.__defpos = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, _BASE] def replace(self, *args, **kwargs): self.__param_coordinate(*args, **kwargs) return self def shift(self, *args, **kwargs): p = zeus_common._args([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ['dx', 'dy', 'dz', 'drz', 'dry', 'drx'], [float, float, float, float, float, float], *args, **kwargs) if p[0] == False: raise CoordinateException(ERR_ZERO_COORDINATE, 1, "shift", p[1]) else: self.pos[0:6] = [self.pos[i] + p[i + 1] for i in range(6)] return self def inv(self): try: m = zeus_common._minv(self.matrix()) mw = [m[0][3], m[1][3], m[2][3]] + zeus_common._eulMatrix(m) p = _Coordinate(mw[0:6], _BASE) except Exception as e: raise CoordinateException(ERR_ZERO_COORDINATE, 5, "inv") return p class _MotionParam(object): """ MotionParam 클래스 로봇 모션 파라미터 정의 Methods: __param_mp(self, *args, **kwargs): 파라미터 해석 __list2mp(self, mp): 리스트로 주어진 모션파라미터를 MotionParameter에 저장 mp2list(self): 모션파라미터를 리스트 형식으로 반환 mp2dict(self): 모션파라미터를 딕셔너리 형식으로 반환 copy(self,*args, **kwargs): MotionParam 객체를 복사하여 반환 clear(self): 모션파라미터 초기화 set_default(self, *args, **kwargs): 모션파라미터 Default 값을 변경 set_motionparam(self, *args, **kwargs): 파라미터 설정 """ __default = [5.0, 5.0, 0.4, 0.4, 2, 2, 0.0, 100, 20.0, 0x11111111] def __init__(self, *args, **kwargs): """ MotionParam 객체 생성 Args: lin_speed (float): Linear interpolation을 기반으로 한 Cartesian 좌표계에서의 직선 운동 모션 속도. [mm/s] Default:5.0 jnt_speed (float): Cartesian 좌표계의 PTP motion, Joint 모션의 속도 비율. [%] Default:5.0 acctime (float): 최대 속도까지 가속하는 데 소요되는 시간을 설정. [sec] Default:0.4 dacctime (float): 속도 0까지 감속하는 데 소요되는 시간을 설정. [sec] Default:0.4 posture (int): 로봇 암의 자세 (0-7). 암의 위치와 Joint 값에 의해 정의됨. Default:2 passm (int): pass 모션 파라미터. ON인 경우, 연속 모션 사이의 대기 시간을 무시함. (ON:1, OFF:2) Default:2 overlap (float): overlap 모션 파라미터. asyncm 활성화 상태에서 타겟에 설정된 거리만큼 접근하면 다음 타겟 모션을 겹쳐서 수행하기 시작함. [mm] Default:0.0 zone (int): 위치 고정(settling) 범위 파라미터. 타겟에 파라미터에 설정한 pulse 이내로 도달한 경우 위치 결정 완료 판정. [pulse] Default:100 pose_speed (float): 자세 보간 모션의 속도 비율.[%] Default:20 (100% = 45deg/s) ik_solver_option (int): 각 Joint의 회전 방향 플래그 [bitflag] Default:0x11111111 0x11[0~3][0~3][0~3][0~3][0~3][0~3] : J6~J1 설정값 0 : 최단 경로 1 : multiturn 파라미터 설정 값 사용 2 : +방향 회전 3 : -방향 회전 """ self.__list2mp(_MotionParam.__default) self.__param_mp(*args, **kwargs) def __param_mp(self, *args, **kwargs): p = zeus_common._args(self.mp2list(), ['lin_speed', 'jnt_speed', 'acctime', 'dacctime', 'posture', 'passm', 'overlap', 'zone', 'pose_speed', 'ik_solver_option'], [float, float, float, float, int, int, float, int, float, int], *args, **kwargs) if p[0] == False: raise MotionParamException(ERR_ZERO_MOTIONPARAM, 1, "__param_mp", p[1]) else: self.__list2mp(p[1:]) return True def __list2mp(self, mp): res = zeus_common._chkparam(mp[0], p_type=[int, float], min=0.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.lin_speed = mp[0] res = zeus_common._chkparam(mp[1], p_type=[int, float], min=0.0, max=100.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.jnt_speed = mp[1] res = zeus_common._chkparam(mp[2], p_type=[int, float], min=0.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.acctime = mp[2] res = zeus_common._chkparam(mp[3], p_type=[int, float], min=0.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.dacctime = mp[3] res = zeus_common._chkparam(mp[4], p_type=int, min=-1, max=7) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.posture = mp[4] res = zeus_common._chkparam(mp[5], p_type=int, min=1, max=2) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.passm = mp[5] res = zeus_common._chkparam(mp[6], p_type=[int, float], min=0.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.overlap = mp[6] res = zeus_common._chkparam(mp[7], p_type=int, min=1) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.zone = mp[7] res = zeus_common._chkparam(mp[8], p_type=[int, float], min=1.0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.pose_speed = mp[8] res = zeus_common._chkparam(mp[9], p_type=int, min=0) if res[0] == False: raise MotionParamException(res[1], res[2], "__list2mp") else: self.ik_solver_option = mp[9] return True def mp2list(self): return [self.lin_speed, self.jnt_speed, self.acctime, self.dacctime, self.posture, self.passm, self.overlap, self.zone, self.pose_speed, self.ik_solver_option] def mp2dict(self): k = ['lin_speed', 'jnt_speed', 'acctime', 'dacctime', 'posture', 'passm', 'overlap', 'zone', 'pose_speed', 'ik_solver_option'] return dict(zip(k, self.mp2list())) def copy(self, *args, **kwargs): p = _MotionParam(self.mp2list()[:]) return p.motionparam(*args, **kwargs) def clear(self): try: self.__list2mp(_MotionParam.__default) except Exception as e: raise MotionParamException(ERR_ZERO_MOTIONPARAM, 2, "clear", e) return True def set_default(self, *args, **kwargs): p = zeus_common._args(_MotionParam.__default, ['lin_speed', 'jnt_speed', 'acctime', 'dacctime', 'posture', 'passm', 'overlap', 'zone', 'pose_speed', 'ik_solver_option'], [float, float, float, float, int, int, float, int, float, int], *args, **kwargs) if p[0] == False: raise MotionParamException(ERR_ZERO_MOTIONPARAM, 1, p[1]) else: _MotionParam.__default = p[1:] return True def set_motionparam(self, *args, **kwargs): try: self.__param_mp(*args, **kwargs) except Exception as e: raise MotionParamException(ERR_ZERO_MOTIONPARAM, 3, e) return True if __name__ == '__main__': try: # region 시작 rb = RobotClient("192.168.0.23", 12345) # open k = input('\nopen') print(rb.open()) # # _NOP # input('\n_NOP') # print(rb.nop()) # endregion #region Motion Planning 계열 # # _PTPPLAN # input('\n_PTPPLAN') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.ptpplan(110,-370,570,90,0,180,7,-1,50,0.4,0.4)) # # _CPPLAN # input('\n_CPPLAN') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cpplan(130,-370,670,90,0,180,7,-1,50,0.4,0.4)) # # _PTPPLAN_W_SP # input('\n_PTPPLAN_W_SP') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.ptpplan_w_sp(90, -370, 570, 90,0,180,7,-1, 110,-370,570,90,0,180,7,-1,50,0.4,0.4)) # # _CPPLAN_W_SP # input('\n_CPPLAN_W_SP') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cpplan_w_sp(120, -370, 570, 90,0,180,7,-1, 130,-370,670,90,0,180,7,-1,50,0.4,0.4)) # # _OPTCPPLAN # input('\n_OPTCPPLAN') # print(rb.optcpplan(110,-380,560, 80, 10, 170, 7, -1, 50, 0.4, 0.4)) # # _PTPPLAN_MT # input('\n_PTPPLAN_MT') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.ptpplan_mt(110,-370,570,90,0,180,7,-1,0xFF000000, 0x11111111,50,0.4,0.4)) # # _PTPPLAN_W_SP_MT # input('\n_PTPPLAN_W_SP_MT') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.ptpplan_w_sp_mt(90, -360, 550, 90,0,180,7,-1,0xFF000000, 0x11111111, 110,-370,570,90,0,180,7,-1,0xFF000000, 0x11111111,50,0.4,0.4)) # # _CPRPLAN # input('\n_CPRPLAN') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cprplan(1,1,1,1,1,1,50,0.4,0.4)) # # _CPRPLAN_W_SP # input('\n_CPRPLAN_W_SP') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cprplan_w_sp(100,-370,570,90,0,180,7,1,1,1,1,1,1,1,50,0.4,0.4)) #endregion # region MOVE 계열 # _PLSMOVE, _MTRMOVE, _JNTMOVE, _JNTRMOVE, _JNTRMOVE_WO_CHK, _PTPMOVE, _PTPMOVE_MT, # _CPMOVE, _CPRMOVE, _OPTCPMOVE, _TRMOVE, _ARCMOVE, _CIRMOVE # # _PLSMOVE # input('\n_PLSMOVE') # print(rb.plsmove(100,100,100,100,100,100,70,0.4,0.4)) # # # _MTRMOVE # input('\n_MTRMOVE 1') # print(rb.mtrmove(1,1,1,1,1,1,70,0.4,0.4)) # # # _JNTMOVE # input('\n_JNTMOVE') # print(rb.jntmove(0,0,90,0,90,0,70,0.4,0.4)) # # # _JNTRMOVE # input('\n_JNTRMOVE') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.jntrmove(1,1,1,1,1,1,50,0.4,0.4)) # # # _JNTRMOVE_WO_CHK # input('\n_JNTRMOVE_WO_CHK') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.jntrmove_wo_chk(1,1,1,1,1,1,50,0.4,0.4)) # w/o soft limit checking # # _PTPMOVE # input('\n_PTPMOVE') # print(rb.ptpmove(101,-371,571,91,-1,181,7,-1,70,0.4,0.4)) # # # _PTPMOVE_MT # input('\n_PTPMOVE_MT') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.ptpmove_mt(110,-380,560, 80, 10, 170, 7, -1, 0xFF000000, 0x11111111, 50, 0.4, 0.4)) # # # _CPMOVE # input('\n_CPMOVE') # print(rb.ptpmove(100,-370,570,90,-1,180,7,-1,70,0.4,0.4)) # # # _CPRMOVE # input('\n_CPRMOVE') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cprmove(1,1,1,1,1,1,50,0.4,0.4)) # # # _OPTCPMOVE # input('\n_OPTCPMOVE') # print(rb.optcpmove(110,-380,560, 80, 10, 170, 7, -1, 50, 0.4, 0.4)) # # # _TRMOVE # input('\n_TRMOVE') # print(rb.trmove(1,1,1,1,1,1,70,0.4,0.4)) # # # _ARCMOVE # (경유점, 끝점) # input('\n_ARCMOVE') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.arcmove(200,-420,570,90,0,180,7,-1,300,-370,570,90,0,180,7,1,100,0.4,0.4,0)) # # # _CIRMOVE # 시작점으로부터 두 점을 지나는 원 # input('\n_CIRMOVE') # print(rb.jntmove(0,0,90,0,90,0,50,0.4,0.4)) # print(rb.cirmove(150,-370,570,90,0,180,7,-1,100,-320,570,90,0,180,7,1,100,0.4,0.4,0)) # endregion # region파라미터 설정, 큐 관리 계열 # # _SET_TOOL # input('\n_SET_TOOL') # print(rb.settool(1,1,1,1,1,1,1)) # # # _CHANGE_TOOL # input('\n_CHANGE_TOOL 1') # print(rb.changetool(1)) # # input('\n_CHANGE_TOOL 0') # print(rb.changetool(0)) # # # _ASYNCM # input('\n_ASYNCM ON') # print(rb.asyncm(1)) # input('\n_ASYNCM READ') # print(rb.asyncm(0)) # # input('\n_ASYNCM OFF') # print(rb.asyncm(2)) # input('\n_ASYNCM READ') # print(rb.asyncm(0)) # # # _PASSM # input('\n_PASSM ON') # print(rb.passm(1)) # input('\n_PASSM READ') # print(rb.passm(0)) # # input('\n_PASSM OFF') # print(rb.passm(2)) # input('\n_PASSM READ') # print(rb.passm(0)) # # # _OVERLAP # input('\n_OVERLAP') # print(rb.overlap(30)) # # # _IOCTRL # input('\n_IOCTRL WRITE') # print(rb.ioctrl(130, 0x00000000, 0x0000ffff, 0, 0xffffffff)) # # input('\n_IOCTRL READ') # print(rb.ioctrl(130, 0, 0xffffffff, 0, 0xffffffff)) # # input('\n_IOCTRL WRITE') # print(rb.ioctrl(130, 0xffff0000, 0x0000ffff, 0, 0xffffffff)) # # input('\n_IOCTRL READ') # print(rb.ioctrl(130, 0, 0xffffffff, 0, 0xffffffff)) # # # _ZONE # input('\n_ZONE') # print(rb.zone(100)) # # # _SLSPEED # input('\n_SLSPEED') # print(rb.slspeed(5)) # print(rb.jntmove(0,0,90,0,90,0,70,0.4,0.4)) # ts = time.time() # print(rb.cprmove(0,0,0,10,10,10,100,0.4,0.4)) # print('tact time:', time.time()-ts) # # print(rb.slspeed(100)) # print(rb.jntmove(0,0,90,0,90,0,70,0.4,0.4)) # ts = time.time() # print(rb.cprmove(0, 0, 0, 10, 10, 10, 100, 0.4, 0.4)) # print('tact time:', time.time()-ts) # # # _ABORTM # input('\n_ABORTM') # rb.asyncm(1) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # rb.jntmove(0,0,90,0,90,0,10,0.4,0.4) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # cnt=0 # while cnt < 300: # cnt+=1 # print('queue:', rb.syssts(5)[1]) # time.sleep(0.01) # print(rb.abortm()) # rb.asyncm(2) # # # _JOINM # input('\n_JOINM') # rb.asyncm(1) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # rb.jntmove(0,0,90,0,90,0,10,0.4,0.4) # print(rb.joinm()) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # rb.asyncm(2) # print() # # # _SUSPENDM, _RESUMEM : 확인이 어려움 #input('\n_SUSPENDM') # # _SET_MDO # input('\n_SET_MDO') # print(rb.set_mdo(1, 16, 1, 1, 30)) # print(rb.set_mdo(2, 16, 0, 2, 30)) # input('\_ENABLE_MDO') # print(rb.enable_mdo(3)) # print(rb.jntmove(0, 0, 0, 0, 0, 0, 10, 0.4, 0.4)) # print(rb.jntmove(0, 0, 30, 0, 30, 0, 10, 0.4, 0.4)) # print(rb.jntmove(0, 0, 0, 0, 0, 0, 10, 0.4, 0.4)) # print(rb.jntmove(0, 0, 30, 0, 30, 0, 10, 0.4, 0.4)) # input('\n_DISABLE_MDO') # print(rb.disable_mdo(3)) # # # _FSCTRL # PRDEF setting is needed. # input('\n_FSCTRL') # print(rb.fsctrl(1)) # print(rb.fsctrl(2)) # print(rb.fsctrl(3)) # print(rb.fsctrl(4)) # print(rb.fsctrl(5)) # endregion # region MARK 계열 # # _MARK # input('\n_MARK') # print(rb.mark()) # # # _JMARK # input('\n_JMARK') # print(rb.jmark()) # # # _PMARK # input('\n_PMARK') # rb.asyncm(1) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # rb.jntmove(0,0,90,0,90,0,10,0.4,0.4) # rb.jntmove(0,0,0,0,0,0,10,0.4,0.4) # while rb.syssts(5)[1] !=0: # print('curpos :', rb.pmark(3)) # print('cmd :', rb.pmark(2)) # print('fb :', rb.pmark(1)) # print('goal:',rb.pmark(4)) # time.sleep(0.1) # print(rb.abortm()) # rb.asyncm(2) # # # _MARK_MT # input('\n_MARK_MT') # print(rb.mark_mt()) # # # _MMARK # MCS 버전 업데이트 필요. 모터 값 확인 # input('\n_MMARK') # rb.asyncm(2) # print(rb.mmark()) # endregion # region 변환 계열 # # _J2R # input('\n_J2R') # print(rb.j2r(0,0,90,0,90,0,-1)) # # # _R2J # input('\n_R2J') # print(rb.r2j(100,-370,570,90,0,180,3,-1,0xFF000000, 0x11111111)) # input('\n_R2J') # print(rb.r2j(100,-370,570,90,0,180,3,-1,0x00FFFFFF, 0x11111111)) # input('\n_R2J') # print(rb.r2j(100,-370,570,90,0,180,3,-1,0x00FFFFFF, 0x11000000)) # input('\n_R2J') # print(rb.r2j(100,-370,570,90,0,180,3,-1,0x00111111, 0x11111111)) # input('\n_R2J') # print(rb.r2j(100,-370,570,90,0,180,3,-1,0x00111111, 0x11000000)) # # # _J2R_MT # input('\n_J2R_MT') # print(rb.j2r_mt(0,0,90,0,90,0,-1)) # # # _R2J_MT # input('\n_R2J_MT') # print(rb.r2j_mt(100,-370,570,90,0,180,3,-1,0xFF000000, 0x11111111)) # input('\n_R2J') # print(rb.r2j_mt(100,-370,570,90,0,180,3,-1,0x00FFFFFF, 0x11111111)) # input('\n_R2J') # print(rb.r2j_mt(100,-370,570,90,0,180,3,-1,0x00FFFFFF, 0x11000000)) # input('\n_R2J') # print(rb.r2j_mt(100,-370,570,90,0,180,3,-1,0x00111111, 0x11111111)) # input('\n_R2J') # print(rb.r2j_mt(100,-370,570,90,0,180,3,-1,0x00111111, 0x11000000)) # endregion # region 시스템 설정, 모니터링 계열 # # _VERSION # input('\n_VERSION') # print(rb.version()) # # # _ENCRST # Encoder Reset (enc_reset.py에서 호출됨) can't execute on simulation # input('\n_ENCRST') # print(rb.encrst(63)) # # # _SAVEPARAMS # HWDEF 파일 갱신(?) # can't execute on simulation # input('\n_SAVEPARAMS') # print(rb.saveparams()) # # # _CALCPLSOFFSET # rb.jntmove(0, 0, 90, 0, 90, 0, 10, 0.4, 0.4) # input('\n_CALCPLSOFFSET 1') # print(rb.calcplsoffset(1,63)) # input('\n_CALCPLSOFFSET 2') # print(rb.calcplsoffset(2, 63)) # input('\n_CALCPLSOFFSET 3') # print(rb.calcplsoffset(3, 63)) # input('\n_CALCPLSOFFSET 4') # print(rb.calcplsoffset(4, 63)) # input('\n_CALCPLSOFFSET 5') # print(rb.calcplsoffset(5, 63)) # input('\n_CALCPLSOFFSET 6') # print(rb.calcplsoffset(6, 63)) # can't execute on simulation # # # _SET_LOG_LEVEL # input('\n_SET_LOG_LEVEL -1') # print(rb.set_log_level(-1)) # input('\n_SET_LOG_LEVEL 4') # print(rb.set_log_level(4)) # input('\n_SET_LOG_LEVEL 3') # print(rb.set_log_level(3)) # input('\n_SET_LOG_LEVEL 2') # print(rb.set_log_level(2)) # input('\n_SET_LOG_LEVEL 1') # print(rb.set_log_level(1)) # input('\n_SET_LOG_LEVEL 0') # print(rb.set_log_level(0)) # # # _SYSCTRL # input('\n_SYSCTRL 1 0') # print(rb.sysctrl(1, 0)) # input('\n_SYSCTRL 1 1') # print(rb.sysctrl(1, 1)) # input('\n_SYSCTRL 0x8000 0xdead') # print(rb.sysctrl(0x8000, 0xdead)) # input('\n_SYSCTRL 0x8000 0xeeee') # print(rb.sysctrl(0x8000, 0xeeee)) # input('\n_SYSCTRL 0x8000 0x8001') # print(rb.sysctrl(0x8000, 0x8001)) # # # _GETMT # Need to check how to use # input('\n_GETMT') # print(rb.getmt()) # # # _SVSW # input('\n_SVSW off') # print(rb.svctrl(2)) # input('\n_SVSW on') # print(rb.svctrl(1)) # input('\n_SVSW off') # print(rb.svctrl(2)) # # # _RELBRK # can't execute on simulation # input('\n_RELBRK') # print(rb.relbrk(0x111111)) # # # _CLPBRK # can't execute on simulation. 강제 브레이크 # input('\n_CLPBRK') # print(rb.clpbrk(0x111111)) # # # _SYSSTS # input('\n_SYSSTS 0') # print(rb.syssts(0)[1]) # input('\n_SYSSTS 1') # print(rb.syssts(1)) # input('\n_SYSSTS 2') # print(rb.syssts(2)) # input('\n_SYSSTS 3') # print(rb.syssts(3)) # input('\n_SYSSTS 4') # print(rb.syssts(4)) # input('\n_SYSSTS 5') # print(rb.syssts(5)) # input('\n_SYSSTS 6') # print(rb.syssts(6)) # input('\n_SYSSTS 7') # print(rb.syssts(7)) # input('\n_SYSSTS 8') # print(rb.syssts(8)) # # # _SETVENV # input('\n_SETVENV') # print(rb.setvenv()) # # endregion # region 종료 k = input('\nclose') res = rb.close() print(res) # endregion except Exception as e: print(e) try: rb.close() except Exception: pass
zeusrobot
/_zeusrobot/zeus_rblib.py
zeus_rblib.py
import math import socket import threading import json import os import sys import builtins import inspect import datetime from numpy import binary_repr from .zeus_error import * # # for module check # from zeus_error import * # region print문 오버로딩 def print(*args, **kwargs): # print to file frm = inspect.stack()[-1] # 최상위 호출자의 프레임 정보를 가져옴 mod = inspect.getmodule(frm[0]) # 모듈 정보를 가져옴 filename = inspect.getsourcefile(mod).split('/')[-1] # 모듈의 소스 파일명을 가져옴 logname = "D:/PythonWork/ZSP/ZSP_PC/Logs/userProgram/"+ filename + '_output.txt' # 로그 파일명 with open(logname, 'a', encoding='utf-8') as fout: current_time = datetime.datetime.now().strftime("[%Y/%m/%d %H:%M:%S]") if "ZeusRobot" in str(frm[-2]): # ZeusRobot import 시 호출된 print 문에 대한 별도 처리 필요한 경우 아래에 추가 pass builtins.print(current_time, end=' ', file=fout) # 타임 스탬프 기록 builtins.print(*args, **kwargs, file=fout) # orginal print builtins.print(*args, **kwargs) # endregion # region Matrix, Vector 연산 def _matI(_n): """ 단위 행렬 생성 Args: _n (int): 단위 행렬의 차수 Returns: list : nxn 단위 행렬 """ return [[1.0 if i == j else 0.0 for j in range(_n)] for i in range(_n)] def _matEuler(rz, ry, rx): """ ZYX-오일러 변환 행렬을 계산 Args: rz: Roll ry: Pitch rx: Yaw Returns: list : 4x4 오일러 변환 행렬 """ cz = math.cos(math.radians(rz)) sz = math.sin(math.radians(rz)) cy = math.cos(math.radians(ry)) sy = math.sin(math.radians(ry)) cx = math.cos(math.radians(rx)) sx = math.sin(math.radians(rx)) return [[cy * cz, sx * sy * cz - cx * sz, sx * sz + cx * sy * cz, 0.], [cy * sz, sx * sy * sz + cx * cz, cx * sy * sz - sx * cz, 0.], [-sy, sx * cy, cx * cy, 0.], [0., 0., 0., 1.]] def _minv(_m): """ 역행렬 계산 Args: _m (list): nxn 정방 행렬 Returns: list : 행렬 연산 결과 """ n = len(_m) # 행렬의 차원 for i in range(n): # 행렬 크기 확인 (nxn) if not len(_m[i]) == n: raise Exception("matrix error") m = [[float(_m[i][j]) for j in range(n)] + [1.0 if j == i else 0.0 for j in range(n)] \ for i in range(n)] # pivot 선택 (대각에 위치한 원소) for i in range(n): t = [abs(m[k][i]) for k in range(i, n)] m[i], m[t.index(max(t)) + i] = m[t.index(max(t)) + i], m[i] m[i] = [m[i][k] / m[i][i] for k in range(2 * n)] for j in range(i + 1, n): m[j] = [m[j][k] - m[i][k] * m[j][i] for k in range(2 * n)] for i in reversed(range(n)): for j in range(0, i): m[j] = [m[j][k] - m[i][k] * m[j][i] for k in range(2 * n)] return _mslice(m, 0, n, n, 2 * n) def _mdotm(_m1, _m2): """ 두 행렬의 내적 연산 (dot product) Args: _m1 (list): nxm 행렬 _m2 (list): mxl 행렬 Returns: list : nxl 행렬 """ return [[_vdotv(_m1[r], [_m2[i][c] for i in range(len(_m2))]) for c in range(len(_m2[0]))] for r in range(len(_m1))] def _mdotv(_m, _v): """ 행렬과 벡터의 내적 연산 (dot product) Args: _m (list): nxm 행렬 _v (list): mx1 벡터 Returns: list : nx1 행렬(벡터) """ return [_vdotv(_m[r], _v) for r in range(len(_m))] def _vdotv(_a, _b): """ 벡터와 벡터의 내적 연산 (dot product) Args: _a (list): nx1 벡터 _b (list): nx1 벡터 Returns: float : 내적 연산 결과 값 """ k = 0.0 for i in range(len(_a)): k += _a[i] * _b[i] return k def _vcrossv(_a, _b): """ 벡터와 벡터의 외적 연산 (cross product) Args: _a (list): 3x1 벡터 _b (list): 3x1 벡터 Returns: list : 외적 연산 결과 행렬 """ return [_a[1] * _b[2] - _a[2] * _b[1], _a[2] * _b[0] - _a[0] * _b[2], _a[0] * _b[1] - _a[1] * _b[0]] def _vabs(v): """ 벡터의 크기 계산 Args: v (list): 3x1 벡터 Returns: float : 벡터의 크기 """ return math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) def _vnorm(v): """ 단위 벡터 취득 Args: v (list): 3x1 벡터 Returns: list : 단위 벡터 """ a = _vabs(v) return [v[0] / a, v[1] / a, v[2] / a] def _vadd(v1, v2): """ 벡터의 합을 계산 Args: v1 (list): [v1,v2,v3] 벡터 v2 (list): [v1,v2,v3] 벡터 Returns: list : 벡터 연산 결과 """ return [v1[k] + v2[k] for k in range(len(v1))] def _vsub(v1, v2): """ 벡터의 차이를 계산 Args: v1 (list): [v1,v2,v3] 벡터 v2 (list): [v1,v2,v3] 벡터 Returns: list : 벡터 연산 결과 """ return [v1[k] - v2[k] for k in range(len(v1))] def _mslice(_m, _r0, _r1, _c0, _c1): """ 행렬을 분리 Args: _m: 분리하고자 하는 행렬 _r0 (int): 시작 행 _r1 (int): 끝 행 _c0 (int): 시작 열 _c1 (int): 끝 열 Returns: list : 지정된 범위의 행렬 요소를 반환 """ return [k[_c0:_c1] for k in _m[_r0:_r1]] def _eulMatrix(m): """ Affine 변환에서의 오일러 각 Args: m (list): 행렬 Returns: list : 오일러 각 3x1 행렬 """ M_PI = 3.1415926535897 M_PI_2 = M_PI / 2. ry = math.asin(-m[2][0]) if math.fabs(ry - M_PI_2) < 0.0000001: rz = math.atan2(m[1][1], m[0][1]) - M_PI_2 ry = M_PI_2 rx = 0.0 elif math.fabs(ry + M_PI_2) < 0.0000001: rz = math.atan2(m[1][1], m[0][1]) - M_PI_2 ry = -M_PI_2 rx = 0.0 else: rz = math.atan2(m[1][0], m[0][0]) rx = math.atan2(m[2][1], m[2][2]) return [math.degrees(rz), math.degrees(ry), math.degrees(rx)] def _matShift(_x, _y, _z): """ 평행 이동 Affine 변환 행렬 생성 Args: _x (float): 평행이동 x값 _y (float): 평행이동 y값 _z (float): 평행이동 z값 Returns: list : 평행이동 Affine 변환 행렬 """ return [ [1., 0., 0., _x], [0., 1., 0., _y], [0., 0., 1., _z], [0., 0., 0., 1.] ] def _matRotate(n, o, a): """ 3개의 벡터에 대한 회전 변환 행렬 생성 Args: n (list): 변환된 강체 좌표계 x축 방향 벡터 (3x1) [n_x, n_y, n_z] o (list): 변환된 강체 좌표계 y축 방향 벡터 (3x1) [o_x, o_y, o_z] a (list): 변환된 강체 좌표계 z축 방향 벡터 (3x1) [a_x, a_y, a_z] Returns: list : 회전 변환 행렬 """ return [ [n[0], o[0], a[0], 0], [n[1], o[1], a[1], 0], [n[2], o[2], a[2], 0], [0, 0, 0, 1] ] # endregion # region Bitflag, Argument, Parameter def _bitflag(val, bit): """ val을 이진수료 표현하고, bit에 해당하는 자리를 읽음 Args: val (int): 10진수 bit (int): 반환하고자 하는 비트 자리 수 Returns: bool : 0이면 False, 1이면 True """ if type(bit) != int: raise Exception("Error : zeus_common._bitflag : Invalid Parameter") return bool((val & (1 << bit))) def _args(val, key, typ, *args, **kwargs): """ Argument 설정 Args: val (list): Argument 데이터 형태 key (list): Key 값 typ (list): 데이터 타입 *args: 리스트 형태의 설정 값 **kwargs: 딕셔너리 형태의 설정 값 Returns: list : 설정된 Argument 리스트 반환 """ _v = dict(zip(key, val)) _t = dict(zip(key, typ)) i = 0 def sub(v, k): if _t[k] == int: if type(v) != int: raise ValueError _v[k] = int(v) elif _t[k] == float: _v[k] = float(v) elif _t[k] == str: _v[k] = str(v) elif _t[k] == list: res = False for k in _t[k]: res = res or (type(v) == k) elif _t[k] == None: _v[k] = v else: if type(v) == _t[k]: _v[k] = v else: raise ValueError try: for a in args: if type(a) == list: for b in a: if i < len(key): sub(b, key[i]) i += 1 else: if i < len(key): sub(a, key[i]) i += 1 for k, v in kwargs.items(): sub(v, k) except Exception as e: return [False, f"Error : zeus_common._args : {e}"] else: return [True] + [_v[j] for j in key] def _chkparam(pram, **criteria): """ 파라미터의 타입 일치 여부 확인 Args: pram: 확인하고자 하는 파라미터 변수 **criteria: p_type(파라미터 타입), min(최소값), max(최대값) Returns: bool: 파라미터 타입 일치 여부 """ if "p_type" in criteria: t = criteria["p_type"] res = False if isinstance(t,list): for k in t: res = res or isinstance(pram,k) elif isinstance(t, type): res = (type(pram) == criteria["p_type"]) if res == False: return [False, 1] if isinstance(pram,int) or isinstance(pram,float): if "min" in criteria: if pram < criteria["min"]: return [False, 2] if "max" in criteria: if pram > criteria["max"]: return [False, 2] return [True] # endregion class _SharedMemory(object): """ SharedMemory 클래스 공유 메모리 읽기/쓰기 기능 수행 Methods: open(self): 공유메모리 통신을 위한 소켓 연결 close(self): 공유메모리 통신을 위한 소켓 연결 종료 shm_read(self, addr, num): 공유메모리의 특정 주소 데이터 취득 shm_write(self, addr, num): 공유메모리의 유저 영역 주소에 데이터 기록 shm_system_write(self, addr, num): 공유메모리의 시스템 영역 주소에 데이터 기록 """ _CHECK = 0 _SHM_READ = 1 _SHM_WRITE = 2 _SHM_SYSTEM_WRITE = 3 # _SHM_SYSTEM_WRITE Address _SYSINFO_ADDR1 = 0x0800 _SYSINFO_ADDR2 = 0x0844 def __init__(self, host = "192.168.0.23", port = 8000): self._host = host self._port = port self._sock = None self._lock = threading.Lock() def open(self): try: if self._sock is None: self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._sock.settimeout(1) self._sock.connect((self._host, self._port)) self._sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000)) except Exception as e: raise SharedMemoryException(ERR_ZERO_SHM, 3, "open", e) return True def close(self): try: if self._sock is not None: self._sock.close() self._sock = None except Exception as e: raise SharedMemoryException(ERR_ZERO_SHM, 4,"close", e) return True def shm_read(self, addr, num): """ Shared Memory 의 데이터를 읽음 Args: addr(int): Shared Memory의 index (0x0100-0x3800) num(int, list[int]): addr 부터 읽고자 하는 데이터 개수 (int) Returns: str : comma로 구분된 string """ res = _chkparam(addr, p_type=int, min=0x0100, max=0x3800) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 2,"shm_read") res = _chkparam(num, p_type=int, max=256) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 1,"shm_read") params = {'cmd': self._SHM_READ, 'params': [int(addr), int(num)]} try: self._lock.acquire() self._sock.send(json.dumps(params).encode('ascii')) buf = self._sock.recv(1024).decode().split(',') self._lock.release() except Exception as e: raise SharedMemoryException(ERR_ZERO_SHM, 5, "shm_read", e) if buf[1] == "": raise SharedMemoryException(ERR_ZERO_SHM, 2, "shm_read", f"Cannot Read from Address 0x{format(addr, '04x')}") return buf def shm_write(self, addr, num): """ Shared Memory 에 데이터 쓰기 Args: addr(int): Shared Memory의 index (0x1800-0x23F8) 0x1800-0x1BFC : 4-byte integer 데이터 저장 메모리 영역 (0x1800, 0x1804, 0x1808 ...) 0x1C00-0x23F8 : 8-byte float 데이터 저장 메모리 영역 (0x1C00, 0x1C08, 0x1C10 ... (0x2400-0x2800은 Reserved Area로, 사용 제한) num(int or float, list[int or float]): addr에 쓰고자 하는 데이터 (int/float 혹은 이를 원소로 갖는 list 또는 tuple) Returns: """ res = _chkparam(addr, p_type=int, min=0x1800, max=0x1BFC) if res[0] == True: # int 범위에 해당하는 경우 if addr % 4 != 0: raise SharedMemoryException(ERR_ZERO_SHM, 2, "shm_write") res = _chkparam(num, p_type=list) # 리스트인지 체크 if res[0] == True: # 리스트이면 if len(num) > 256: # 리스트 길이 체크 raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write", "limit of list length is 256") for i in range(len(num)): # 리스트 길이 만큼 체크 반복 res = _chkparam(num[i], p_type=int) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write") else: # 리스트가 아니면 res = _chkparam(num, p_type=int) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write", "limit of list length is 256") else: res = _chkparam(addr, p_type=int, min=0x1C00, max=0x23F8) if res[0] == True: # float 범위에 해당하는 경우 if addr % 8 != 0: raise SharedMemoryException(ERR_ZERO_SHM, 2, "shm_write") res = _chkparam(num, p_type=list) # 리스트인지 체크 if res[0] == True: # 리스트이면 if len(num) > 256: # 리스트 길이 체크 raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write", "limit of list length is 256") for i in range(len(num)): # 리스트 길이 만큼 체크 반복 res = _chkparam(num[i], p_type=[int,float]) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write") else: # 리스트가 아니면 res = _chkparam(num, p_type=[int,float]) if res[0] == False: raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write") else: res = _chkparam(addr, p_type=int, min=0x2400, max=0x27FC) if res[0] == True: raise SharedMemoryException(ERR_ZERO_SHM, 2, "shm_write", "Reserved Area") else: raise SharedMemoryException(ERR_ZERO_SHM, 1, "shm_write") params = {'cmd': self._SHM_WRITE, 'params': [int(addr), num]} try: self._lock.acquire() self._sock.send(json.dumps(params).encode('ascii')) buf = self._sock.recv(1024).decode() self._lock.release() except Exception as e: raise SharedMemoryException(ERR_ZERO_SHM, 6, "shm_write", e) return buf def shm_system_write(self, addr, num): """ 시스템 메모리 Write (System 관리 용도. 사용 금지) Args: addr(int): _SYSINFO_ADDR1 : 0x0800 _SYSINFO_ADDR2 : 0x0844 num(list): _SYSINFO_ADDR1 인 경우, [prog_name_default, prog_name, prog_pid] [str, str, int] _SYSINFO_ADDR2 인 경우, [run, stop, err_reset, pause, running, svon, emo, hw_error, sw_error, abs_lost, in_pause, error] (총 12개, int 타입) Returns: """ if not (addr == self._SYSINFO_ADDR1 or addr == self._SYSINFO_ADDR2): raise SharedMemoryException(ERR_ZERO_SHM, 2, "shm_system_write", f"Error : _SharedMemory.shm_read : Cannot Write on Address 0x{format(addr, '04x')}" f"\nAccessable Address : 0x{format(self._SYSINFO_ADDR1, '04x')}(_SYSINFO_ADDR1) " f"/ 0x{format(self._SYSINFO_ADDR2, '04x')}(_SYSINFO_ADDR2)" ) params = {'cmd': self._SHM_SYSTEM_WRITE, 'params': [int(addr), num]} try: self._lock.acquire() self._sock.send(json.dumps(params).encode('ascii')) buf = self._sock.recv(1024).decode() self._lock.release() except Exception as e: raise SharedMemoryException(ERR_ZERO_SHM, 6, "shm_system_write", e) return buf if __name__ == "__main__": shm = _SharedMemory() shm.open() try: r = shm.shm_system_write(0x0101,1) except Exception as e: print(e) shm.close() # while True: # input("close") # shm.close() # input("open") # shm.open() # shm.shm_write(0x1800, 0) # time.sleep(0.01) # shm.shm_read(0x1800,6) # time.sleep(0.01) # shm.shm_write(0x1800, [1,6]) # time.sleep(0.01) # shm.shm_read(0x1800,6) # print(shm.shm_read(0x0120, 8)) # time.sleep(0.5) # print(shm.shm_write(0x1C00, 100000.123456)) # time.sleep(0.5) # ret=shm.shm_read(0x1C00, 1) # print(ret, type(ret)) # time.sleep(0.5) # # print(shm.shm_write(0x1C01, 100000.123456)) # time.sleep(0.5) # print(shm.shm_read(0x1C00, 1)) # time.sleep(0.5) # print(shm.shm_read(0x1C01, 8)) # # print(shm.shm_write(0x1C08, [4,4.4,3, 3.3, 2, 2.2, 1, 1.1])) # time.sleep(0.5) # print(shm.shm_read(0x1C00, 8)) # while True: # shm.shm_write(0x1800, 0) # time.sleep(0.01) # shm.shm_read(0x1800, 6) # time.sleep(0.01) # shm.shm_write(0x1800, [1,6]) # time.sleep(0.01) # shm.shm_read(0x1800,6) # time.sleep(0.01) # shm.shm_system_write(shm._SYSINFO_ADDR1, ["", "ShmServer.py",0]) # time.sleep(0.01) # print(_matI(4)) # print(_minv([[1,2,3],[4,5,6],[7,8,9]])) # print(_bitflag(255, 1)) # print(_bitflag(255, 8))
zeusrobot
/_zeusrobot/zeus_common.py
zeus_common.py
import os import sys import mmap import tempfile import struct import threading from typing import Any,Union,Tuple,List from .zeus_shared_memory_table import ZeusShmData from .zeus_shared_memory_format import _shm_format, _shm_num_data shm_format, shm_num_data = _shm_format, _shm_num_data class ZeusShm: u"""Shared memory class for Windows""" _ZeusShmTable = ZeusShmData.ZeusShmTable _ZeusShmSize:int = ZeusShmData.ZeusShmSize _debug_out = False @classmethod def log(cls, *msg:Any) -> None: """Debug output """ if cls._debug_out: print(*msg) def __init__(self, is_readonly:bool = True) -> None: """ Args: is_readonly (bool, optional): Set False if write function is required. Defaults to True. Remark: Only system manager should use 'is_readonly = False' condition. """ self.fp = None self.mm: Union[mmap.mmap, None] = None self.update_counter = 0 self.lock = threading.Lock() self.is_read_only = is_readonly if os.name == "nt": tempdir = tempfile.gettempdir() else: tempdir = "/dev/shm" path = ".ZeusShmData" self.filepath = tempdir + os.sep + path self.log("* Shared file path:{}".format(self.filepath)) self.log("* Shared memory size: {} bytes".format(self._ZeusShmSize)) if is_readonly: self.fp = open(self.filepath, 'rb') self.mm = mmap.mmap(self.fp.fileno(), 0, access=mmap.ACCESS_READ) else: # Erase old data if not os.path.isfile(self.filepath) or os.path.getsize(self.filepath)!=self._ZeusShmSize: self.log("* Recreate shared memory file.") if os.path.isfile(self.filepath): os.remove(self.filepath) self.fp = open(self.filepath, 'wb') self.fp.write(bytes([0]*self._ZeusShmSize)) self.fp.close() self.fp = open(self.filepath, 'r+b') self.fp.seek(0) self.mm = mmap.mmap(self.fp.fileno(), 0, access=mmap.ACCESS_WRITE) self.log("{} success".format(sys._getframe().f_code.co_name)) def __del__(self) -> None: self._close() def _close(self) -> None: if self.mm is not None: self.mm.close() del self.mm self.mm = None if self.fp is not None: self.fp.close() del self.fp self.fp = None self.log("{}".format(sys._getframe().f_code.co_name)) def _search_index(self, address:int) -> int: find_idx = -1 for idx in range(len(self._ZeusShmTable)): item = self._ZeusShmTable[idx] if item[0] == address: find_idx = idx break # for idx, item in enumerate(self._ZeusShmTable): # if item[0] == address: # find_idx = idx # break if find_idx == -1: raise ValueError("Invalid address:0x{:04X}".format(address)) return find_idx def _prepare_extract(self, begin_idx:int, n:int) -> Tuple[int,str,int]: target_size = 0 pack_str = "<" for i in range(n): if begin_idx + i >= len(self._ZeusShmTable): break adr,size,_,unpack_ch,_ = self._ZeusShmTable[begin_idx + i] target_size += int(size) pack_str += unpack_ch return (target_size, pack_str) def _read_binary(self, begin_idx:int, size:int) -> bytes: if self.mm is None: return b"" self.mm.seek(self._ZeusShmTable[begin_idx][2]) byte_data = self.mm.read(size) #print("read all data:{}:{}".format(size,byte_data)) return byte_data def _write_binary(self, begin_idx:int, data:bytes) -> None: if self.mm is None: return self.mm.seek(self._ZeusShmTable[begin_idx][2]) self.mm.write(data) def _clear_all(self) -> None: """Clear all memory""" #print("_clear_all ({})".format(self.is_read_only), flush=True) if self.is_read_only: raise PermissionError("Current permission is ReadOnly.") zero_array = [0]*self._ZeusShmSize zero_data = bytes(zero_array) self._write_binary(0,zero_data) def read(self, address:int, n:int = 1) -> List[Any]: """Read data array from shared memory Args: address(int): Start address to write n(int): number of data (Default value is 1) Returns: List[Any]: Data array Remark: Read function is supposed to be used by any process. """ if n<=0: raise IndexError("Invalid number value:{}".format(n)) begin_idx = self._search_index(address) target_size, pack_str= self._prepare_extract(begin_idx,n) byte_data = self._read_binary(begin_idx,target_size) # print("read_binary idx={},target_size={},pack_str={},n={},len={}".format( # begin_idx,target_size,pack_str,n,len(byte_data))) result = struct.unpack(pack_str,byte_data) result = [ x.rstrip(b'\x00').decode() if isinstance(x,bytes) else x for x in result] self.log("{}({},{})={}".format(sys._getframe().f_code.co_name,address,n,result)) return result def write(self, address:int, data_list:List[Any]) -> None: """Write data array to shared memory Args: address(int): Start address to write data_list(List[Any]):Data array to be written Remark: Write function is supposed to be used by only system manager. """ if self.is_read_only: raise PermissionError("Current permission is ReadOnly.") if type(data_list) != list: raise TypeError("data_list must be list.") n = len(data_list) if n == 0: raise ValueError("data_list is empty.") begin_idx = self._search_index(address) _, pack_str = self._prepare_extract(begin_idx,n) #print("pack_str={}, data={}".format(pack_str,data_list)) data_list = [x.encode() if isinstance(x,str) else x for x in data_list] #print("pack_str:{}".format(pack_str)) #print("data:{}".format(data_list)) pack_data = struct.pack(pack_str,*data_list) self._write_binary(begin_idx,pack_data) self.log("{}({},{})".format(sys._getframe().f_code.co_name,address,n)) #EOF
zeusrobot
/_zeusrobot/zeus_shared_memory/zeus_shared_memory_api.py
zeus_shared_memory_api.py
from typing import Tuple,Any class ZeusShmData: """ Windows PC 에서 공유메모리 데이터를 정의 """ # Shared memory entry # (address, byte_size, byte_offset, unpack_type, name), ZeusShmTable:Tuple[Any,...] = ( (0x0008, 4, 0, "i", "update_counter" ), # 0 (0x000C, 4, 4, "i", "now_updating" ), # 1 (0x00E8, 8, 8, "Q", "ts_begin_update" ), # 2 (0x00F0, 8, 16, "Q", "ts_end_update" ), # 3 (0x00F8, 8, 24, "Q", "ts_write_to_device" ), # 4 (0x0100, 4, 32, "i", "dio_io" ), # 5 (0x0104, 4, 36, "i", "dio_handio" ), # 6 (0x0108, 4, 40, "i", "dio_handadc" ), # 7 (0x010C, 4, 44, "i", "stsl" ), # 8 (0x010D, 4, 48, "i", "stsh" ), # 9 (0x010E, 4, 52, "i", "in_rl" ), # 10 (0x010F, 4, 56, "i", "in_rh" ), # 11 (0x0110, 4, 60, "i", "out_rl" ), # 12 (0x0111, 4, 64, "i", "_7seg1" ), # 13 (0x0112, 4, 68, "i", "_7seg2" ), # 14 (0x0113, 4, 72, "i", "_7seg3" ), # 15 (0x0114, 4, 76, "i", "in_x1" ), # 16 (0x0115, 4, 80, "i", "in_x2" ), # 17 (0x0116, 4, 84, "i", "rsv1" ), # 18 (0x0117, 4, 88, "i", "rsv2" ), # 19 (0x0118, 4, 92, "i", "out_x1" ), # 20 (0x0119, 4, 96, "i", "out_x2" ), # 21 (0x011A, 4, 100, "i", "rsv3" ), # 22 (0x011B, 4, 104, "i", "rsv4" ), # 23 (0x0120, 4, 108, "i", "motor_input[0]" ), # 24 (0x0124, 4, 112, "i", "motor_input[1]" ), # 25 (0x0128, 4, 116, "i", "motor_input[2]" ), # 26 (0x012C, 4, 120, "i", "motor_input[3]" ), # 27 (0x0130, 4, 124, "i", "motor_outinput[0]" ), # 28 (0x0134, 4, 128, "i", "motor_outinput[1]" ), # 29 (0x0138, 4, 132, "i", "motor_outinput[2]" ), # 30 (0x013C, 4, 136, "i", "motor_outinput[3]" ), # 31 (0x0140, 4, 140, "i", "ethercat_input[0]" ), # 32 (0x0144, 4, 144, "i", "ethercat_input[1]" ), # 33 (0x0148, 4, 148, "i", "ethercat_input[2]" ), # 34 (0x014C, 4, 152, "i", "ethercat_input[3]" ), # 35 (0x0150, 4, 156, "i", "ethercat_input[4]" ), # 36 (0x0154, 4, 160, "i", "ethercat_input[5]" ), # 37 (0x0158, 4, 164, "i", "ethercat_input[6]" ), # 38 (0x015C, 4, 168, "i", "ethercat_input[7]" ), # 39 (0x0160, 4, 172, "i", "ethercat_output[0]" ), # 40 (0x0164, 4, 176, "i", "ethercat_output[1]" ), # 41 (0x0168, 4, 180, "i", "ethercat_output[2]" ), # 42 (0x016C, 4, 184, "i", "ethercat_output[3]" ), # 43 (0x0170, 4, 188, "i", "ethercat_output[4]" ), # 44 (0x0174, 4, 192, "i", "ethercat_output[5]" ), # 45 (0x0178, 4, 196, "i", "ethercat_output[6]" ), # 46 (0x017C, 4, 200, "i", "ethercat_output[7]" ), # 47 (0x0180, 4, 204, "i", "cclink_bit_input[0]" ), # 48 (0x0184, 4, 208, "i", "cclink_bit_input[1]" ), # 49 (0x0188, 4, 212, "i", "cclink_bit_input[2]" ), # 50 (0x018C, 4, 216, "i", "cclink_bit_input[3]" ), # 51 (0x0190, 4, 220, "i", "cclink_bit_input[4]" ), # 52 (0x0194, 4, 224, "i", "cclink_bit_input[5]" ), # 53 (0x0198, 4, 228, "i", "cclink_bit_input[6]" ), # 54 (0x019C, 4, 232, "i", "cclink_bit_input[7]" ), # 55 (0x01A0, 4, 236, "i", "cclink_bit_output[0]" ), # 56 (0x01A4, 4, 240, "i", "cclink_bit_output[1]" ), # 57 (0x01A8, 4, 244, "i", "cclink_bit_output[2]" ), # 58 (0x01AC, 4, 248, "i", "cclink_bit_output[3]" ), # 59 (0x01B0, 4, 252, "i", "cclink_bit_output[4]" ), # 60 (0x01B4, 4, 256, "i", "cclink_bit_output[5]" ), # 61 (0x01B8, 4, 260, "i", "cclink_bit_output[6]" ), # 62 (0x01BC, 4, 264, "i", "cclink_bit_output[7]" ), # 63 (0x01C0, 4, 268, "i", "cclink_word_input[0]" ), # 64 (0x01C4, 4, 272, "i", "cclink_word_input[1]" ), # 65 (0x01C8, 4, 276, "i", "cclink_word_input[2]" ), # 66 (0x01CC, 4, 280, "i", "cclink_word_input[3]" ), # 67 (0x01D0, 4, 284, "i", "cclink_word_input[4]" ), # 68 (0x01D4, 4, 288, "i", "cclink_word_input[5]" ), # 69 (0x01D8, 4, 292, "i", "cclink_word_input[6]" ), # 70 (0x01DC, 4, 296, "i", "cclink_word_input[7]" ), # 71 (0x01E0, 4, 300, "i", "cclink_word_input[8]" ), # 72 (0x01E4, 4, 304, "i", "cclink_word_input[9]" ), # 73 (0x01E8, 4, 308, "i", "cclink_word_input[10]" ), # 74 (0x01EC, 4, 312, "i", "cclink_word_input[11]" ), # 75 (0x01F0, 4, 316, "i", "cclink_word_input[12]" ), # 76 (0x01F4, 4, 320, "i", "cclink_word_input[13]" ), # 77 (0x01F8, 4, 324, "i", "cclink_word_input[14]" ), # 78 (0x01FC, 4, 328, "i", "cclink_word_input[15]" ), # 79 (0x0200, 4, 332, "i", "cclink_word_output[0]" ), # 80 (0x0204, 4, 336, "i", "cclink_word_output[1]" ), # 81 (0x0208, 4, 340, "i", "cclink_word_output[2]" ), # 82 (0x020C, 4, 344, "i", "cclink_word_output[3]" ), # 83 (0x0210, 4, 348, "i", "cclink_word_output[4]" ), # 84 (0x0214, 4, 352, "i", "cclink_word_output[5]" ), # 85 (0x0218, 4, 356, "i", "cclink_word_output[6]" ), # 86 (0x021C, 4, 360, "i", "cclink_word_output[7]" ), # 87 (0x0220, 4, 364, "i", "cclink_word_output[8]" ), # 88 (0x0224, 4, 368, "i", "cclink_word_output[9]" ), # 89 (0x0228, 4, 372, "i", "cclink_word_output[10]"), # 90 (0x022C, 4, 376, "i", "cclink_word_output[11]"), # 91 (0x0230, 4, 380, "i", "cclink_word_output[12]"), # 92 (0x0234, 4, 384, "i", "cclink_word_output[13]"), # 93 (0x0238, 4, 388, "i", "cclink_word_output[14]"), # 94 (0x023C, 4, 392, "i", "cclink_word_output[15]"), # 95 (0x0300, 4, 396, "i", "mio_si0" ), # 96 (0x0304, 4, 400, "i", "mio_si1" ), # 97 (0x0308, 4, 404, "i", "mio_si2" ), # 98 (0x030C, 4, 408, "i", "mio_si3" ), # 99 (0x0310, 4, 412, "i", "mio_sl0" ), # 100 (0x0314, 4, 416, "i", "mio_sl1" ), # 101 (0x0318, 4, 420, "i", "mio_sl2" ), # 102 (0x031C, 4, 424, "i", "mio_sl3" ), # 103 (0x0320, 4, 428, "i", "mio_pi0[0]" ), # 104 (0x0324, 4, 432, "i", "mio_pi0[1]" ), # 105 (0x0328, 4, 436, "i", "mio_pi0[2]" ), # 106 (0x032C, 4, 440, "i", "mio_pi0[3]" ), # 107 (0x0330, 4, 444, "i", "mio_pi0[4]" ), # 108 (0x0334, 4, 448, "i", "mio_pi0[5]" ), # 109 (0x0338, 4, 452, "i", "mio_pi0[6]" ), # 110 (0x033C, 4, 456, "i", "mio_pi0[7]" ), # 111 (0x0340, 4, 460, "i", "mio_pi0[8]" ), # 112 (0x0344, 4, 464, "i", "mio_pi0[9]" ), # 113 (0x0348, 4, 468, "i", "mio_pi0[10]" ), # 114 (0x034C, 4, 472, "i", "mio_pi0[11]" ), # 115 (0x0350, 4, 476, "i", "mio_pi0[12]" ), # 116 (0x0354, 4, 480, "i", "mio_pi0[13]" ), # 117 (0x0358, 4, 484, "i", "mio_pi0[14]" ), # 118 (0x035C, 4, 488, "i", "mio_pi0[15]" ), # 119 (0x0360, 4, 492, "i", "mio_pi0[16]" ), # 120 (0x0364, 4, 496, "i", "mio_pi0[17]" ), # 121 (0x0368, 4, 500, "i", "mio_pi0[18]" ), # 122 (0x036C, 4, 504, "i", "mio_pi0[19]" ), # 123 (0x0370, 4, 508, "i", "mio_pi0[20]" ), # 124 (0x0374, 4, 512, "i", "mio_pi0[21]" ), # 125 (0x0378, 4, 516, "i", "mio_pi0[22]" ), # 126 (0x037C, 4, 520, "i", "mio_pi0[23]" ), # 127 (0x0380, 4, 524, "i", "mio_pi0[24]" ), # 128 (0x0384, 4, 528, "i", "mio_pi0[25]" ), # 129 (0x0388, 4, 532, "i", "mio_pi0[26]" ), # 130 (0x038C, 4, 536, "i", "mio_pi0[27]" ), # 131 (0x0390, 4, 540, "i", "mio_pi0[28]" ), # 132 (0x0394, 4, 544, "i", "mio_pi0[29]" ), # 133 (0x0398, 4, 548, "i", "mio_pi0[30]" ), # 134 (0x039C, 4, 552, "i", "mio_pi0[31]" ), # 135 (0x03A0, 4, 556, "i", "mio_pi0[32]" ), # 136 (0x03A4, 4, 560, "i", "mio_pi0[33]" ), # 137 (0x03A8, 4, 564, "i", "mio_pi0[34]" ), # 138 (0x03AC, 4, 568, "i", "mio_pi0[35]" ), # 139 (0x03B0, 4, 572, "i", "mio_pi0[36]" ), # 140 (0x03B4, 4, 576, "i", "mio_pi0[37]" ), # 141 (0x03B8, 4, 580, "i", "mio_pi0[38]" ), # 142 (0x03BC, 4, 584, "i", "mio_pi0[39]" ), # 143 (0x03C0, 4, 588, "i", "mio_pi0[40]" ), # 144 (0x03C4, 4, 592, "i", "mio_pi0[41]" ), # 145 (0x03C8, 4, 596, "i", "mio_pi0[42]" ), # 146 (0x03CC, 4, 600, "i", "mio_pi0[43]" ), # 147 (0x03D0, 4, 604, "i", "mio_pi0[44]" ), # 148 (0x03D4, 4, 608, "i", "mio_pi0[45]" ), # 149 (0x03D8, 4, 612, "i", "mio_pi0[46]" ), # 150 (0x03DC, 4, 616, "i", "mio_pi0[47]" ), # 151 (0x03E0, 4, 620, "i", "mio_pi0[48]" ), # 152 (0x03E4, 4, 624, "i", "mio_pi0[49]" ), # 153 (0x03E8, 4, 628, "i", "mio_pi0[50]" ), # 154 (0x03EC, 4, 632, "i", "mio_pi0[51]" ), # 155 (0x03F0, 4, 636, "i", "mio_pi0[52]" ), # 156 (0x03F4, 4, 640, "i", "mio_pi0[53]" ), # 157 (0x03F8, 4, 644, "i", "mio_pi0[54]" ), # 158 (0x03FC, 4, 648, "i", "mio_pi0[55]" ), # 159 (0x0400, 4, 652, "i", "mio_pi0[56]" ), # 160 (0x0404, 4, 656, "i", "mio_pi0[57]" ), # 161 (0x0408, 4, 660, "i", "mio_pi0[58]" ), # 162 (0x040C, 4, 664, "i", "mio_pi0[59]" ), # 163 (0x0410, 4, 668, "i", "mio_pi0[60]" ), # 164 (0x0414, 4, 672, "i", "mio_pi0[61]" ), # 165 (0x0418, 4, 676, "i", "mio_pi0[62]" ), # 166 (0x041C, 4, 680, "i", "mio_pi0[63]" ), # 167 (0x0420, 4, 684, "i", "mio_pi0[64]" ), # 168 (0x0424, 4, 688, "i", "mio_pi0[65]" ), # 169 (0x0428, 4, 692, "i", "mio_pi0[66]" ), # 170 (0x042C, 4, 696, "i", "mio_pi0[67]" ), # 171 (0x0430, 4, 700, "i", "mio_pi0[68]" ), # 172 (0x0434, 4, 704, "i", "mio_pi0[69]" ), # 173 (0x0438, 4, 708, "i", "mio_pi0[70]" ), # 174 (0x043C, 4, 712, "i", "mio_pi0[71]" ), # 175 (0x0440, 4, 716, "i", "mio_pi0[72]" ), # 176 (0x0444, 4, 720, "i", "mio_pi0[73]" ), # 177 (0x0448, 4, 724, "i", "mio_pi0[74]" ), # 178 (0x044C, 4, 728, "i", "mio_pi0[75]" ), # 179 (0x0450, 4, 732, "i", "mio_pi0[76]" ), # 180 (0x0454, 4, 736, "i", "mio_pi0[77]" ), # 181 (0x0458, 4, 740, "i", "mio_pi0[78]" ), # 182 (0x045C, 4, 744, "i", "mio_pi0[79]" ), # 183 (0x0460, 4, 748, "i", "mio_pi0[80]" ), # 184 (0x0464, 4, 752, "i", "mio_pi0[81]" ), # 185 (0x0468, 4, 756, "i", "mio_pi0[82]" ), # 186 (0x046C, 4, 760, "i", "mio_pi0[83]" ), # 187 (0x0470, 4, 764, "i", "mio_pi0[84]" ), # 188 (0x0474, 4, 768, "i", "mio_pi0[85]" ), # 189 (0x0478, 4, 772, "i", "mio_pi0[86]" ), # 190 (0x047C, 4, 776, "i", "mio_pi0[87]" ), # 191 (0x0480, 4, 780, "i", "mio_pi0[88]" ), # 192 (0x0484, 4, 784, "i", "mio_pi0[89]" ), # 193 (0x0488, 4, 788, "i", "mio_pi0[90]" ), # 194 (0x048C, 4, 792, "i", "mio_pi0[91]" ), # 195 (0x0490, 4, 796, "i", "mio_pi0[92]" ), # 196 (0x0494, 4, 800, "i", "mio_pi0[93]" ), # 197 (0x0498, 4, 804, "i", "mio_pi0[94]" ), # 198 (0x049C, 4, 808, "i", "mio_pi0[95]" ), # 199 (0x04A0, 4, 812, "i", "mio_pi0[96]" ), # 200 (0x04A4, 4, 816, "i", "mio_pi0[97]" ), # 201 (0x04A8, 4, 820, "i", "mio_pi0[98]" ), # 202 (0x04AC, 4, 824, "i", "mio_pi0[99]" ), # 203 (0x04B0, 4, 828, "i", "mio_pi0[100]" ), # 204 (0x04B4, 4, 832, "i", "mio_pi0[101]" ), # 205 (0x04B8, 4, 836, "i", "mio_pi0[102]" ), # 206 (0x04BC, 4, 840, "i", "mio_pi0[103]" ), # 207 (0x04C0, 4, 844, "i", "mio_pi0[104]" ), # 208 (0x04C4, 4, 848, "i", "mio_pi0[105]" ), # 209 (0x04C8, 4, 852, "i", "mio_pi0[106]" ), # 210 (0x04CC, 4, 856, "i", "mio_pi0[107]" ), # 211 (0x04D0, 4, 860, "i", "mio_pi0[108]" ), # 212 (0x04D4, 4, 864, "i", "mio_pi0[109]" ), # 213 (0x04D8, 4, 868, "i", "mio_pi0[110]" ), # 214 (0x04DC, 4, 872, "i", "mio_pi0[111]" ), # 215 (0x04E0, 4, 876, "i", "mio_pi0[112]" ), # 216 (0x04E4, 4, 880, "i", "mio_pi0[113]" ), # 217 (0x04E8, 4, 884, "i", "mio_pi0[114]" ), # 218 (0x04EC, 4, 888, "i", "mio_pi0[115]" ), # 219 (0x04F0, 4, 892, "i", "mio_pi0[116]" ), # 220 (0x04F4, 4, 896, "i", "mio_pi0[117]" ), # 221 (0x04F8, 4, 900, "i", "mio_pi0[118]" ), # 222 (0x04FC, 4, 904, "i", "mio_pi0[119]" ), # 223 (0x0500, 4, 908, "i", "write.cia402ctrl" ), # 224 (0x0502, 4, 912, "i", "write.ctrl" ), # 225 (0x0504, 4, 916, "i", "write.cia402targetpls" ), # 226 (0x0508, 4, 920, "i", "write.notification" ), # 227 (0x050C, 4, 924, "i", "read.cia402sts" ), # 228 (0x050E, 4, 928, "i", "read.sts" ), # 229 (0x0510, 4, 932, "i", "read.rtn" ), # 230 (0x0512, 4, 936, "i", "read.cia402err" ), # 231 (0x0514, 4, 940, "i", "read.alarm" ), # 232 (0x0518, 4, 944, "i", "read.targetplsfb" ), # 233 (0x051C, 4, 948, "i", "read.cia402actualpls" ), # 234 (0x0520, 4, 952, "i", "read.cia402followingerr"), # 235 (0x0524, 4, 956, "i", "read.observer_output_value"), # 236 (0x0528, 4, 960, "i", "read.torque" ), # 237 (0x052A, 4, 964, "i", "read.thermal" ), # 238 (0x052C, 4, 968, "i", "read.disturbance" ), # 239 (0x052E, 4, 972, "i", "read.gainrate" ), # 240 (0x0530, 4, 976, "i", "read.polerate" ), # 241 (0x0532, 4, 980, "i", "read.filtered_torque" ), # 242 (0x0534, 4, 984, "i", "read.filtered_velocity"), # 243 (0x0536, 4, 988, "i", "read.filtered_D" ), # 244 (0x0538, 4, 992, "i", "read.filtered_Q" ), # 245 (0x0540, 4, 996, "i", "write.cia402ctrl" ), # 246 (0x0542, 4, 1000, "i", "write.ctrl" ), # 247 (0x0544, 4, 1004, "i", "write.cia402targetpls" ), # 248 (0x0548, 4, 1008, "i", "write.notification" ), # 249 (0x054C, 4, 1012, "i", "read.cia402sts" ), # 250 (0x054E, 4, 1016, "i", "read.sts" ), # 251 (0x0550, 4, 1020, "i", "read.rtn" ), # 252 (0x0552, 4, 1024, "i", "read.cia402err" ), # 253 (0x0554, 4, 1028, "i", "read.alarm" ), # 254 (0x0558, 4, 1032, "i", "read.targetplsfb" ), # 255 (0x055C, 4, 1036, "i", "read.cia402actualpls" ), # 256 (0x0560, 4, 1040, "i", "read.cia402followingerr"), # 257 (0x0564, 4, 1044, "i", "read.observer_output_value"), # 258 (0x0568, 4, 1048, "i", "read.torque" ), # 259 (0x056A, 4, 1052, "i", "read.thermal" ), # 260 (0x056C, 4, 1056, "i", "read.disturbance" ), # 261 (0x056E, 4, 1060, "i", "read.gainrate" ), # 262 (0x0570, 4, 1064, "i", "read.polerate" ), # 263 (0x0572, 4, 1068, "i", "read.filtered_torque" ), # 264 (0x0574, 4, 1072, "i", "read.filtered_velocity"), # 265 (0x0576, 4, 1076, "i", "read.filtered_D" ), # 266 (0x0578, 4, 1080, "i", "read.filtered_Q" ), # 267 (0x0580, 4, 1084, "i", "write.cia402ctrl" ), # 268 (0x0582, 4, 1088, "i", "write.ctrl" ), # 269 (0x0584, 4, 1092, "i", "write.cia402targetpls" ), # 270 (0x0588, 4, 1096, "i", "write.notification" ), # 271 (0x058C, 4, 1100, "i", "read.cia402sts" ), # 272 (0x058E, 4, 1104, "i", "read.sts" ), # 273 (0x0590, 4, 1108, "i", "read.rtn" ), # 274 (0x0592, 4, 1112, "i", "read.cia402err" ), # 275 (0x0594, 4, 1116, "i", "read.alarm" ), # 276 (0x0598, 4, 1120, "i", "read.targetplsfb" ), # 277 (0x059C, 4, 1124, "i", "read.cia402actualpls" ), # 278 (0x05A0, 4, 1128, "i", "read.cia402followingerr"), # 279 (0x05A4, 4, 1132, "i", "read.observer_output_value"), # 280 (0x05A8, 4, 1136, "i", "read.torque" ), # 281 (0x05AA, 4, 1140, "i", "read.thermal" ), # 282 (0x05AC, 4, 1144, "i", "read.disturbance" ), # 283 (0x05AE, 4, 1148, "i", "read.gainrate" ), # 284 (0x05B0, 4, 1152, "i", "read.polerate" ), # 285 (0x05B2, 4, 1156, "i", "read.filtered_torque" ), # 286 (0x05B4, 4, 1160, "i", "read.filtered_velocity"), # 287 (0x05B6, 4, 1164, "i", "read.filtered_D" ), # 288 (0x05B8, 4, 1168, "i", "read.filtered_Q" ), # 289 (0x05C0, 4, 1172, "i", "write.cia402ctrl" ), # 290 (0x05C2, 4, 1176, "i", "write.ctrl" ), # 291 (0x05C4, 4, 1180, "i", "write.cia402targetpls" ), # 292 (0x05C8, 4, 1184, "i", "write.notification" ), # 293 (0x05CC, 4, 1188, "i", "read.cia402sts" ), # 294 (0x05CE, 4, 1192, "i", "read.sts" ), # 295 (0x05D0, 4, 1196, "i", "read.rtn" ), # 296 (0x05D2, 4, 1200, "i", "read.cia402err" ), # 297 (0x05D4, 4, 1204, "i", "read.alarm" ), # 298 (0x05D8, 4, 1208, "i", "read.targetplsfb" ), # 299 (0x05DC, 4, 1212, "i", "read.cia402actualpls" ), # 300 (0x05E0, 4, 1216, "i", "read.cia402followingerr"), # 301 (0x05E4, 4, 1220, "i", "read.observer_output_value"), # 302 (0x05E8, 4, 1224, "i", "read.torque" ), # 303 (0x05EA, 4, 1228, "i", "read.thermal" ), # 304 (0x05EC, 4, 1232, "i", "read.disturbance" ), # 305 (0x05EE, 4, 1236, "i", "read.gainrate" ), # 306 (0x05F0, 4, 1240, "i", "read.polerate" ), # 307 (0x05F2, 4, 1244, "i", "read.filtered_torque" ), # 308 (0x05F4, 4, 1248, "i", "read.filtered_velocity"), # 309 (0x05F6, 4, 1252, "i", "read.filtered_D" ), # 310 (0x05F8, 4, 1256, "i", "read.filtered_Q" ), # 311 (0x0600, 4, 1260, "i", "write.cia402ctrl" ), # 312 (0x0602, 4, 1264, "i", "write.ctrl" ), # 313 (0x0604, 4, 1268, "i", "write.cia402targetpls" ), # 314 (0x0608, 4, 1272, "i", "write.notification" ), # 315 (0x060C, 4, 1276, "i", "read.cia402sts" ), # 316 (0x060E, 4, 1280, "i", "read.sts" ), # 317 (0x0610, 4, 1284, "i", "read.rtn" ), # 318 (0x0612, 4, 1288, "i", "read.cia402err" ), # 319 (0x0614, 4, 1292, "i", "read.alarm" ), # 320 (0x0618, 4, 1296, "i", "read.targetplsfb" ), # 321 (0x061C, 4, 1300, "i", "read.cia402actualpls" ), # 322 (0x0620, 4, 1304, "i", "read.cia402followingerr"), # 323 (0x0624, 4, 1308, "i", "read.observer_output_value"), # 324 (0x0628, 4, 1312, "i", "read.torque" ), # 325 (0x062A, 4, 1316, "i", "read.thermal" ), # 326 (0x062C, 4, 1320, "i", "read.disturbance" ), # 327 (0x062E, 4, 1324, "i", "read.gainrate" ), # 328 (0x0630, 4, 1328, "i", "read.polerate" ), # 329 (0x0632, 4, 1332, "i", "read.filtered_torque" ), # 330 (0x0634, 4, 1336, "i", "read.filtered_velocity"), # 331 (0x0636, 4, 1340, "i", "read.filtered_D" ), # 332 (0x0638, 4, 1344, "i", "read.filtered_Q" ), # 333 (0x0640, 4, 1348, "i", "write.cia402ctrl" ), # 334 (0x0642, 4, 1352, "i", "write.ctrl" ), # 335 (0x0644, 4, 1356, "i", "write.cia402targetpls" ), # 336 (0x0648, 4, 1360, "i", "write.notification" ), # 337 (0x064C, 4, 1364, "i", "read.cia402sts" ), # 338 (0x064E, 4, 1368, "i", "read.sts" ), # 339 (0x0650, 4, 1372, "i", "read.rtn" ), # 340 (0x0652, 4, 1376, "i", "read.cia402err" ), # 341 (0x0654, 4, 1380, "i", "read.alarm" ), # 342 (0x0658, 4, 1384, "i", "read.targetplsfb" ), # 343 (0x065C, 4, 1388, "i", "read.cia402actualpls" ), # 344 (0x0660, 4, 1392, "i", "read.cia402followingerr"), # 345 (0x0664, 4, 1396, "i", "read.observer_output_value"), # 346 (0x0668, 4, 1400, "i", "read.torque" ), # 347 (0x066A, 4, 1404, "i", "read.thermal" ), # 348 (0x066C, 4, 1408, "i", "read.disturbance" ), # 349 (0x066E, 4, 1412, "i", "read.gainrate" ), # 350 (0x0670, 4, 1416, "i", "read.polerate" ), # 351 (0x0672, 4, 1420, "i", "read.filtered_torque" ), # 352 (0x0674, 4, 1424, "i", "read.filtered_velocity"), # 353 (0x0676, 4, 1428, "i", "read.filtered_D" ), # 354 (0x0678, 4, 1432, "i", "read.filtered_Q" ), # 355 (0x0680, 4, 1436, "i", "write.cia402ctrl" ), # 356 (0x0682, 4, 1440, "i", "write.ctrl" ), # 357 (0x0684, 4, 1444, "i", "write.cia402targetpls" ), # 358 (0x0688, 4, 1448, "i", "write.notification" ), # 359 (0x068C, 4, 1452, "i", "read.cia402sts" ), # 360 (0x068E, 4, 1456, "i", "read.sts" ), # 361 (0x0690, 4, 1460, "i", "read.rtn" ), # 362 (0x0692, 4, 1464, "i", "read.cia402err" ), # 363 (0x0694, 4, 1468, "i", "read.alarm" ), # 364 (0x0698, 4, 1472, "i", "read.targetplsfb" ), # 365 (0x069C, 4, 1476, "i", "read.cia402actualpls" ), # 366 (0x06A0, 4, 1480, "i", "read.cia402followingerr"), # 367 (0x06A4, 4, 1484, "i", "read.observer_output_value"), # 368 (0x06A8, 4, 1488, "i", "read.torque" ), # 369 (0x06AA, 4, 1492, "i", "read.thermal" ), # 370 (0x06AC, 4, 1496, "i", "read.disturbance" ), # 371 (0x06AE, 4, 1500, "i", "read.gainrate" ), # 372 (0x06B0, 4, 1504, "i", "read.polerate" ), # 373 (0x06B2, 4, 1508, "i", "read.filtered_torque" ), # 374 (0x06B4, 4, 1512, "i", "read.filtered_velocity"), # 375 (0x06B6, 4, 1516, "i", "read.filtered_D" ), # 376 (0x06B8, 4, 1520, "i", "read.filtered_Q" ), # 377 (0x06C0, 4, 1524, "i", "write.cia402ctrl" ), # 378 (0x06C2, 4, 1528, "i", "write.ctrl" ), # 379 (0x06C4, 4, 1532, "i", "write.cia402targetpls" ), # 380 (0x06C8, 4, 1536, "i", "write.notification" ), # 381 (0x06CC, 4, 1540, "i", "read.cia402sts" ), # 382 (0x06CE, 4, 1544, "i", "read.sts" ), # 383 (0x06D0, 4, 1548, "i", "read.rtn" ), # 384 (0x06D2, 4, 1552, "i", "read.cia402err" ), # 385 (0x06D4, 4, 1556, "i", "read.alarm" ), # 386 (0x06D8, 4, 1560, "i", "read.targetplsfb" ), # 387 (0x06DC, 4, 1564, "i", "read.cia402actualpls" ), # 388 (0x06E0, 4, 1568, "i", "read.cia402followingerr"), # 389 (0x06E4, 4, 1572, "i", "read.observer_output_value"), # 390 (0x06E8, 4, 1576, "i", "read.torque" ), # 391 (0x06EA, 4, 1580, "i", "read.thermal" ), # 392 (0x06EC, 4, 1584, "i", "read.disturbance" ), # 393 (0x06EE, 4, 1588, "i", "read.gainrate" ), # 394 (0x06F0, 4, 1592, "i", "read.polerate" ), # 395 (0x06F2, 4, 1596, "i", "read.filtered_torque" ), # 396 (0x06F4, 4, 1600, "i", "read.filtered_velocity"), # 397 (0x06F6, 4, 1604, "i", "read.filtered_D" ), # 398 (0x06F8, 4, 1608, "i", "read.filtered_Q" ), # 399 (0x0700, 4, 1612, "i", "sts" ), # 400 (0x0701, 4, 1616, "i", "gain_sts" ), # 401 (0x0710, 4, 1620, "i", "zero_point[0]" ), # 402 (0x0712, 4, 1624, "i", "zero_point[1]" ), # 403 (0x0714, 4, 1628, "i", "zero_point[2]" ), # 404 (0x0716, 4, 1632, "i", "zero_point[3]" ), # 405 (0x0718, 4, 1636, "i", "zero_point[4]" ), # 406 (0x071A, 4, 1640, "i", "zero_point[5]" ), # 407 (0x071C, 4, 1644, "i", "zero_point[6]" ), # 408 (0x071E, 4, 1648, "i", "zero_point[7]" ), # 409 (0x0720, 4, 1652, "i", "raw_value[0]" ), # 410 (0x0722, 4, 1656, "i", "raw_value[1]" ), # 411 (0x0724, 4, 1660, "i", "raw_value[2]" ), # 412 (0x0726, 4, 1664, "i", "raw_value[3]" ), # 413 (0x0728, 4, 1668, "i", "raw_value[4]" ), # 414 (0x072A, 4, 1672, "i", "raw_value[5]" ), # 415 (0x072C, 4, 1676, "i", "raw_value[6]" ), # 416 (0x072E, 4, 1680, "i", "raw_value[7]" ), # 417 (0x0730, 4, 1684, "f", "gain[0]" ), # 418 (0x0738, 4, 1688, "f", "gain[1]" ), # 419 (0x0740, 4, 1692, "f", "gain[2]" ), # 420 (0x0748, 4, 1696, "f", "gain[3]" ), # 421 (0x0750, 4, 1700, "f", "gain[4]" ), # 422 (0x0758, 4, 1704, "f", "gain[5]" ), # 423 (0x0760, 4, 1708, "f", "gain[6]" ), # 424 (0x0768, 4, 1712, "f", "gain[7]" ), # 425 (0x0800, 36, 1716, "36s", "robtask_name" ), # 426 (0x0820, 36, 1752, "36s", "running_name" ), # 427 (0x0840, 4, 1788, "i", "running_pid" ), # 428 (0x0844, 4, 1792, "i", "assign_port[0]" ), # 429 (0x0846, 4, 1796, "i", "assign_port[1]" ), # 430 (0x0848, 4, 1800, "i", "assign_port[2]" ), # 431 (0x084A, 4, 1804, "i", "assign_port[3]" ), # 432 (0x084C, 4, 1808, "i", "assign_port[4]" ), # 433 (0x084E, 4, 1812, "i", "assign_port[5]" ), # 434 (0x0850, 4, 1816, "i", "assign_port[6]" ), # 435 (0x0852, 4, 1820, "i", "assign_port[7]" ), # 436 (0x0854, 4, 1824, "i", "assign_port[8]" ), # 437 (0x0856, 4, 1828, "i", "assign_port[9]" ), # 438 (0x0858, 4, 1832, "i", "assign_port[10]" ), # 439 (0x085A, 4, 1836, "i", "assign_port[11]" ), # 440 (0x1800, 4, 1840, "i", "intval[0]" ), # 441 (0x1804, 4, 1844, "i", "intval[1]" ), # 442 (0x1808, 4, 1848, "i", "intval[2]" ), # 443 (0x180C, 4, 1852, "i", "intval[3]" ), # 444 (0x1810, 4, 1856, "i", "intval[4]" ), # 445 (0x1814, 4, 1860, "i", "intval[5]" ), # 446 (0x1818, 4, 1864, "i", "intval[6]" ), # 447 (0x181C, 4, 1868, "i", "intval[7]" ), # 448 (0x1820, 4, 1872, "i", "intval[8]" ), # 449 (0x1824, 4, 1876, "i", "intval[9]" ), # 450 (0x1828, 4, 1880, "i", "intval[10]" ), # 451 (0x182C, 4, 1884, "i", "intval[11]" ), # 452 (0x1830, 4, 1888, "i", "intval[12]" ), # 453 (0x1834, 4, 1892, "i", "intval[13]" ), # 454 (0x1838, 4, 1896, "i", "intval[14]" ), # 455 (0x183C, 4, 1900, "i", "intval[15]" ), # 456 (0x1840, 4, 1904, "i", "intval[16]" ), # 457 (0x1844, 4, 1908, "i", "intval[17]" ), # 458 (0x1848, 4, 1912, "i", "intval[18]" ), # 459 (0x184C, 4, 1916, "i", "intval[19]" ), # 460 (0x1850, 4, 1920, "i", "intval[20]" ), # 461 (0x1854, 4, 1924, "i", "intval[21]" ), # 462 (0x1858, 4, 1928, "i", "intval[22]" ), # 463 (0x185C, 4, 1932, "i", "intval[23]" ), # 464 (0x1860, 4, 1936, "i", "intval[24]" ), # 465 (0x1864, 4, 1940, "i", "intval[25]" ), # 466 (0x1868, 4, 1944, "i", "intval[26]" ), # 467 (0x186C, 4, 1948, "i", "intval[27]" ), # 468 (0x1870, 4, 1952, "i", "intval[28]" ), # 469 (0x1874, 4, 1956, "i", "intval[29]" ), # 470 (0x1878, 4, 1960, "i", "intval[30]" ), # 471 (0x187C, 4, 1964, "i", "intval[31]" ), # 472 (0x1880, 4, 1968, "i", "intval[32]" ), # 473 (0x1884, 4, 1972, "i", "intval[33]" ), # 474 (0x1888, 4, 1976, "i", "intval[34]" ), # 475 (0x188C, 4, 1980, "i", "intval[35]" ), # 476 (0x1890, 4, 1984, "i", "intval[36]" ), # 477 (0x1894, 4, 1988, "i", "intval[37]" ), # 478 (0x1898, 4, 1992, "i", "intval[38]" ), # 479 (0x189C, 4, 1996, "i", "intval[39]" ), # 480 (0x18A0, 4, 2000, "i", "intval[40]" ), # 481 (0x18A4, 4, 2004, "i", "intval[41]" ), # 482 (0x18A8, 4, 2008, "i", "intval[42]" ), # 483 (0x18AC, 4, 2012, "i", "intval[43]" ), # 484 (0x18B0, 4, 2016, "i", "intval[44]" ), # 485 (0x18B4, 4, 2020, "i", "intval[45]" ), # 486 (0x18B8, 4, 2024, "i", "intval[46]" ), # 487 (0x18BC, 4, 2028, "i", "intval[47]" ), # 488 (0x18C0, 4, 2032, "i", "intval[48]" ), # 489 (0x18C4, 4, 2036, "i", "intval[49]" ), # 490 (0x18C8, 4, 2040, "i", "intval[50]" ), # 491 (0x18CC, 4, 2044, "i", "intval[51]" ), # 492 (0x18D0, 4, 2048, "i", "intval[52]" ), # 493 (0x18D4, 4, 2052, "i", "intval[53]" ), # 494 (0x18D8, 4, 2056, "i", "intval[54]" ), # 495 (0x18DC, 4, 2060, "i", "intval[55]" ), # 496 (0x18E0, 4, 2064, "i", "intval[56]" ), # 497 (0x18E4, 4, 2068, "i", "intval[57]" ), # 498 (0x18E8, 4, 2072, "i", "intval[58]" ), # 499 (0x18EC, 4, 2076, "i", "intval[59]" ), # 500 (0x18F0, 4, 2080, "i", "intval[60]" ), # 501 (0x18F4, 4, 2084, "i", "intval[61]" ), # 502 (0x18F8, 4, 2088, "i", "intval[62]" ), # 503 (0x18FC, 4, 2092, "i", "intval[63]" ), # 504 (0x1900, 4, 2096, "i", "intval[64]" ), # 505 (0x1904, 4, 2100, "i", "intval[65]" ), # 506 (0x1908, 4, 2104, "i", "intval[66]" ), # 507 (0x190C, 4, 2108, "i", "intval[67]" ), # 508 (0x1910, 4, 2112, "i", "intval[68]" ), # 509 (0x1914, 4, 2116, "i", "intval[69]" ), # 510 (0x1918, 4, 2120, "i", "intval[70]" ), # 511 (0x191C, 4, 2124, "i", "intval[71]" ), # 512 (0x1920, 4, 2128, "i", "intval[72]" ), # 513 (0x1924, 4, 2132, "i", "intval[73]" ), # 514 (0x1928, 4, 2136, "i", "intval[74]" ), # 515 (0x192C, 4, 2140, "i", "intval[75]" ), # 516 (0x1930, 4, 2144, "i", "intval[76]" ), # 517 (0x1934, 4, 2148, "i", "intval[77]" ), # 518 (0x1938, 4, 2152, "i", "intval[78]" ), # 519 (0x193C, 4, 2156, "i", "intval[79]" ), # 520 (0x1940, 4, 2160, "i", "intval[80]" ), # 521 (0x1944, 4, 2164, "i", "intval[81]" ), # 522 (0x1948, 4, 2168, "i", "intval[82]" ), # 523 (0x194C, 4, 2172, "i", "intval[83]" ), # 524 (0x1950, 4, 2176, "i", "intval[84]" ), # 525 (0x1954, 4, 2180, "i", "intval[85]" ), # 526 (0x1958, 4, 2184, "i", "intval[86]" ), # 527 (0x195C, 4, 2188, "i", "intval[87]" ), # 528 (0x1960, 4, 2192, "i", "intval[88]" ), # 529 (0x1964, 4, 2196, "i", "intval[89]" ), # 530 (0x1968, 4, 2200, "i", "intval[90]" ), # 531 (0x196C, 4, 2204, "i", "intval[91]" ), # 532 (0x1970, 4, 2208, "i", "intval[92]" ), # 533 (0x1974, 4, 2212, "i", "intval[93]" ), # 534 (0x1978, 4, 2216, "i", "intval[94]" ), # 535 (0x197C, 4, 2220, "i", "intval[95]" ), # 536 (0x1980, 4, 2224, "i", "intval[96]" ), # 537 (0x1984, 4, 2228, "i", "intval[97]" ), # 538 (0x1988, 4, 2232, "i", "intval[98]" ), # 539 (0x198C, 4, 2236, "i", "intval[99]" ), # 540 (0x1990, 4, 2240, "i", "intval[100]" ), # 541 (0x1994, 4, 2244, "i", "intval[101]" ), # 542 (0x1998, 4, 2248, "i", "intval[102]" ), # 543 (0x199C, 4, 2252, "i", "intval[103]" ), # 544 (0x19A0, 4, 2256, "i", "intval[104]" ), # 545 (0x19A4, 4, 2260, "i", "intval[105]" ), # 546 (0x19A8, 4, 2264, "i", "intval[106]" ), # 547 (0x19AC, 4, 2268, "i", "intval[107]" ), # 548 (0x19B0, 4, 2272, "i", "intval[108]" ), # 549 (0x19B4, 4, 2276, "i", "intval[109]" ), # 550 (0x19B8, 4, 2280, "i", "intval[110]" ), # 551 (0x19BC, 4, 2284, "i", "intval[111]" ), # 552 (0x19C0, 4, 2288, "i", "intval[112]" ), # 553 (0x19C4, 4, 2292, "i", "intval[113]" ), # 554 (0x19C8, 4, 2296, "i", "intval[114]" ), # 555 (0x19CC, 4, 2300, "i", "intval[115]" ), # 556 (0x19D0, 4, 2304, "i", "intval[116]" ), # 557 (0x19D4, 4, 2308, "i", "intval[117]" ), # 558 (0x19D8, 4, 2312, "i", "intval[118]" ), # 559 (0x19DC, 4, 2316, "i", "intval[119]" ), # 560 (0x19E0, 4, 2320, "i", "intval[120]" ), # 561 (0x19E4, 4, 2324, "i", "intval[121]" ), # 562 (0x19E8, 4, 2328, "i", "intval[122]" ), # 563 (0x19EC, 4, 2332, "i", "intval[123]" ), # 564 (0x19F0, 4, 2336, "i", "intval[124]" ), # 565 (0x19F4, 4, 2340, "i", "intval[125]" ), # 566 (0x19F8, 4, 2344, "i", "intval[126]" ), # 567 (0x19FC, 4, 2348, "i", "intval[127]" ), # 568 (0x1A00, 4, 2352, "i", "intval[128]" ), # 569 (0x1A04, 4, 2356, "i", "intval[129]" ), # 570 (0x1A08, 4, 2360, "i", "intval[130]" ), # 571 (0x1A0C, 4, 2364, "i", "intval[131]" ), # 572 (0x1A10, 4, 2368, "i", "intval[132]" ), # 573 (0x1A14, 4, 2372, "i", "intval[133]" ), # 574 (0x1A18, 4, 2376, "i", "intval[134]" ), # 575 (0x1A1C, 4, 2380, "i", "intval[135]" ), # 576 (0x1A20, 4, 2384, "i", "intval[136]" ), # 577 (0x1A24, 4, 2388, "i", "intval[137]" ), # 578 (0x1A28, 4, 2392, "i", "intval[138]" ), # 579 (0x1A2C, 4, 2396, "i", "intval[139]" ), # 580 (0x1A30, 4, 2400, "i", "intval[140]" ), # 581 (0x1A34, 4, 2404, "i", "intval[141]" ), # 582 (0x1A38, 4, 2408, "i", "intval[142]" ), # 583 (0x1A3C, 4, 2412, "i", "intval[143]" ), # 584 (0x1A40, 4, 2416, "i", "intval[144]" ), # 585 (0x1A44, 4, 2420, "i", "intval[145]" ), # 586 (0x1A48, 4, 2424, "i", "intval[146]" ), # 587 (0x1A4C, 4, 2428, "i", "intval[147]" ), # 588 (0x1A50, 4, 2432, "i", "intval[148]" ), # 589 (0x1A54, 4, 2436, "i", "intval[149]" ), # 590 (0x1A58, 4, 2440, "i", "intval[150]" ), # 591 (0x1A5C, 4, 2444, "i", "intval[151]" ), # 592 (0x1A60, 4, 2448, "i", "intval[152]" ), # 593 (0x1A64, 4, 2452, "i", "intval[153]" ), # 594 (0x1A68, 4, 2456, "i", "intval[154]" ), # 595 (0x1A6C, 4, 2460, "i", "intval[155]" ), # 596 (0x1A70, 4, 2464, "i", "intval[156]" ), # 597 (0x1A74, 4, 2468, "i", "intval[157]" ), # 598 (0x1A78, 4, 2472, "i", "intval[158]" ), # 599 (0x1A7C, 4, 2476, "i", "intval[159]" ), # 600 (0x1A80, 4, 2480, "i", "intval[160]" ), # 601 (0x1A84, 4, 2484, "i", "intval[161]" ), # 602 (0x1A88, 4, 2488, "i", "intval[162]" ), # 603 (0x1A8C, 4, 2492, "i", "intval[163]" ), # 604 (0x1A90, 4, 2496, "i", "intval[164]" ), # 605 (0x1A94, 4, 2500, "i", "intval[165]" ), # 606 (0x1A98, 4, 2504, "i", "intval[166]" ), # 607 (0x1A9C, 4, 2508, "i", "intval[167]" ), # 608 (0x1AA0, 4, 2512, "i", "intval[168]" ), # 609 (0x1AA4, 4, 2516, "i", "intval[169]" ), # 610 (0x1AA8, 4, 2520, "i", "intval[170]" ), # 611 (0x1AAC, 4, 2524, "i", "intval[171]" ), # 612 (0x1AB0, 4, 2528, "i", "intval[172]" ), # 613 (0x1AB4, 4, 2532, "i", "intval[173]" ), # 614 (0x1AB8, 4, 2536, "i", "intval[174]" ), # 615 (0x1ABC, 4, 2540, "i", "intval[175]" ), # 616 (0x1AC0, 4, 2544, "i", "intval[176]" ), # 617 (0x1AC4, 4, 2548, "i", "intval[177]" ), # 618 (0x1AC8, 4, 2552, "i", "intval[178]" ), # 619 (0x1ACC, 4, 2556, "i", "intval[179]" ), # 620 (0x1AD0, 4, 2560, "i", "intval[180]" ), # 621 (0x1AD4, 4, 2564, "i", "intval[181]" ), # 622 (0x1AD8, 4, 2568, "i", "intval[182]" ), # 623 (0x1ADC, 4, 2572, "i", "intval[183]" ), # 624 (0x1AE0, 4, 2576, "i", "intval[184]" ), # 625 (0x1AE4, 4, 2580, "i", "intval[185]" ), # 626 (0x1AE8, 4, 2584, "i", "intval[186]" ), # 627 (0x1AEC, 4, 2588, "i", "intval[187]" ), # 628 (0x1AF0, 4, 2592, "i", "intval[188]" ), # 629 (0x1AF4, 4, 2596, "i", "intval[189]" ), # 630 (0x1AF8, 4, 2600, "i", "intval[190]" ), # 631 (0x1AFC, 4, 2604, "i", "intval[191]" ), # 632 (0x1B00, 4, 2608, "i", "intval[192]" ), # 633 (0x1B04, 4, 2612, "i", "intval[193]" ), # 634 (0x1B08, 4, 2616, "i", "intval[194]" ), # 635 (0x1B0C, 4, 2620, "i", "intval[195]" ), # 636 (0x1B10, 4, 2624, "i", "intval[196]" ), # 637 (0x1B14, 4, 2628, "i", "intval[197]" ), # 638 (0x1B18, 4, 2632, "i", "intval[198]" ), # 639 (0x1B1C, 4, 2636, "i", "intval[199]" ), # 640 (0x1B20, 4, 2640, "i", "intval[200]" ), # 641 (0x1B24, 4, 2644, "i", "intval[201]" ), # 642 (0x1B28, 4, 2648, "i", "intval[202]" ), # 643 (0x1B2C, 4, 2652, "i", "intval[203]" ), # 644 (0x1B30, 4, 2656, "i", "intval[204]" ), # 645 (0x1B34, 4, 2660, "i", "intval[205]" ), # 646 (0x1B38, 4, 2664, "i", "intval[206]" ), # 647 (0x1B3C, 4, 2668, "i", "intval[207]" ), # 648 (0x1B40, 4, 2672, "i", "intval[208]" ), # 649 (0x1B44, 4, 2676, "i", "intval[209]" ), # 650 (0x1B48, 4, 2680, "i", "intval[210]" ), # 651 (0x1B4C, 4, 2684, "i", "intval[211]" ), # 652 (0x1B50, 4, 2688, "i", "intval[212]" ), # 653 (0x1B54, 4, 2692, "i", "intval[213]" ), # 654 (0x1B58, 4, 2696, "i", "intval[214]" ), # 655 (0x1B5C, 4, 2700, "i", "intval[215]" ), # 656 (0x1B60, 4, 2704, "i", "intval[216]" ), # 657 (0x1B64, 4, 2708, "i", "intval[217]" ), # 658 (0x1B68, 4, 2712, "i", "intval[218]" ), # 659 (0x1B6C, 4, 2716, "i", "intval[219]" ), # 660 (0x1B70, 4, 2720, "i", "intval[220]" ), # 661 (0x1B74, 4, 2724, "i", "intval[221]" ), # 662 (0x1B78, 4, 2728, "i", "intval[222]" ), # 663 (0x1B7C, 4, 2732, "i", "intval[223]" ), # 664 (0x1B80, 4, 2736, "i", "intval[224]" ), # 665 (0x1B84, 4, 2740, "i", "intval[225]" ), # 666 (0x1B88, 4, 2744, "i", "intval[226]" ), # 667 (0x1B8C, 4, 2748, "i", "intval[227]" ), # 668 (0x1B90, 4, 2752, "i", "intval[228]" ), # 669 (0x1B94, 4, 2756, "i", "intval[229]" ), # 670 (0x1B98, 4, 2760, "i", "intval[230]" ), # 671 (0x1B9C, 4, 2764, "i", "intval[231]" ), # 672 (0x1BA0, 4, 2768, "i", "intval[232]" ), # 673 (0x1BA4, 4, 2772, "i", "intval[233]" ), # 674 (0x1BA8, 4, 2776, "i", "intval[234]" ), # 675 (0x1BAC, 4, 2780, "i", "intval[235]" ), # 676 (0x1BB0, 4, 2784, "i", "intval[236]" ), # 677 (0x1BB4, 4, 2788, "i", "intval[237]" ), # 678 (0x1BB8, 4, 2792, "i", "intval[238]" ), # 679 (0x1BBC, 4, 2796, "i", "intval[239]" ), # 680 (0x1BC0, 4, 2800, "i", "intval[240]" ), # 681 (0x1BC4, 4, 2804, "i", "intval[241]" ), # 682 (0x1BC8, 4, 2808, "i", "intval[242]" ), # 683 (0x1BCC, 4, 2812, "i", "intval[243]" ), # 684 (0x1BD0, 4, 2816, "i", "intval[244]" ), # 685 (0x1BD4, 4, 2820, "i", "intval[245]" ), # 686 (0x1BD8, 4, 2824, "i", "intval[246]" ), # 687 (0x1BDC, 4, 2828, "i", "intval[247]" ), # 688 (0x1BE0, 4, 2832, "i", "intval[248]" ), # 689 (0x1BE4, 4, 2836, "i", "intval[249]" ), # 690 (0x1BE8, 4, 2840, "i", "intval[250]" ), # 691 (0x1BEC, 4, 2844, "i", "intval[251]" ), # 692 (0x1BF0, 4, 2848, "i", "intval[252]" ), # 693 (0x1BF4, 4, 2852, "i", "intval[253]" ), # 694 (0x1BF8, 4, 2856, "i", "intval[254]" ), # 695 (0x1BFC, 4, 2860, "i", "intval[255]" ), # 696 (0x1C00, 4, 2864, "f", "floatval[0]" ), # 697 (0x1C08, 4, 2868, "f", "floatval[1]" ), # 698 (0x1C10, 4, 2872, "f", "floatval[2]" ), # 699 (0x1C18, 4, 2876, "f", "floatval[3]" ), # 700 (0x1C20, 4, 2880, "f", "floatval[4]" ), # 701 (0x1C28, 4, 2884, "f", "floatval[5]" ), # 702 (0x1C30, 4, 2888, "f", "floatval[6]" ), # 703 (0x1C38, 4, 2892, "f", "floatval[7]" ), # 704 (0x1C40, 4, 2896, "f", "floatval[8]" ), # 705 (0x1C48, 4, 2900, "f", "floatval[9]" ), # 706 (0x1C50, 4, 2904, "f", "floatval[10]" ), # 707 (0x1C58, 4, 2908, "f", "floatval[11]" ), # 708 (0x1C60, 4, 2912, "f", "floatval[12]" ), # 709 (0x1C68, 4, 2916, "f", "floatval[13]" ), # 710 (0x1C70, 4, 2920, "f", "floatval[14]" ), # 711 (0x1C78, 4, 2924, "f", "floatval[15]" ), # 712 (0x1C80, 4, 2928, "f", "floatval[16]" ), # 713 (0x1C88, 4, 2932, "f", "floatval[17]" ), # 714 (0x1C90, 4, 2936, "f", "floatval[18]" ), # 715 (0x1C98, 4, 2940, "f", "floatval[19]" ), # 716 (0x1CA0, 4, 2944, "f", "floatval[20]" ), # 717 (0x1CA8, 4, 2948, "f", "floatval[21]" ), # 718 (0x1CB0, 4, 2952, "f", "floatval[22]" ), # 719 (0x1CB8, 4, 2956, "f", "floatval[23]" ), # 720 (0x1CC0, 4, 2960, "f", "floatval[24]" ), # 721 (0x1CC8, 4, 2964, "f", "floatval[25]" ), # 722 (0x1CD0, 4, 2968, "f", "floatval[26]" ), # 723 (0x1CD8, 4, 2972, "f", "floatval[27]" ), # 724 (0x1CE0, 4, 2976, "f", "floatval[28]" ), # 725 (0x1CE8, 4, 2980, "f", "floatval[29]" ), # 726 (0x1CF0, 4, 2984, "f", "floatval[30]" ), # 727 (0x1CF8, 4, 2988, "f", "floatval[31]" ), # 728 (0x1D00, 4, 2992, "f", "floatval[32]" ), # 729 (0x1D08, 4, 2996, "f", "floatval[33]" ), # 730 (0x1D10, 4, 3000, "f", "floatval[34]" ), # 731 (0x1D18, 4, 3004, "f", "floatval[35]" ), # 732 (0x1D20, 4, 3008, "f", "floatval[36]" ), # 733 (0x1D28, 4, 3012, "f", "floatval[37]" ), # 734 (0x1D30, 4, 3016, "f", "floatval[38]" ), # 735 (0x1D38, 4, 3020, "f", "floatval[39]" ), # 736 (0x1D40, 4, 3024, "f", "floatval[40]" ), # 737 (0x1D48, 4, 3028, "f", "floatval[41]" ), # 738 (0x1D50, 4, 3032, "f", "floatval[42]" ), # 739 (0x1D58, 4, 3036, "f", "floatval[43]" ), # 740 (0x1D60, 4, 3040, "f", "floatval[44]" ), # 741 (0x1D68, 4, 3044, "f", "floatval[45]" ), # 742 (0x1D70, 4, 3048, "f", "floatval[46]" ), # 743 (0x1D78, 4, 3052, "f", "floatval[47]" ), # 744 (0x1D80, 4, 3056, "f", "floatval[48]" ), # 745 (0x1D88, 4, 3060, "f", "floatval[49]" ), # 746 (0x1D90, 4, 3064, "f", "floatval[50]" ), # 747 (0x1D98, 4, 3068, "f", "floatval[51]" ), # 748 (0x1DA0, 4, 3072, "f", "floatval[52]" ), # 749 (0x1DA8, 4, 3076, "f", "floatval[53]" ), # 750 (0x1DB0, 4, 3080, "f", "floatval[54]" ), # 751 (0x1DB8, 4, 3084, "f", "floatval[55]" ), # 752 (0x1DC0, 4, 3088, "f", "floatval[56]" ), # 753 (0x1DC8, 4, 3092, "f", "floatval[57]" ), # 754 (0x1DD0, 4, 3096, "f", "floatval[58]" ), # 755 (0x1DD8, 4, 3100, "f", "floatval[59]" ), # 756 (0x1DE0, 4, 3104, "f", "floatval[60]" ), # 757 (0x1DE8, 4, 3108, "f", "floatval[61]" ), # 758 (0x1DF0, 4, 3112, "f", "floatval[62]" ), # 759 (0x1DF8, 4, 3116, "f", "floatval[63]" ), # 760 (0x1E00, 4, 3120, "f", "floatval[64]" ), # 761 (0x1E08, 4, 3124, "f", "floatval[65]" ), # 762 (0x1E10, 4, 3128, "f", "floatval[66]" ), # 763 (0x1E18, 4, 3132, "f", "floatval[67]" ), # 764 (0x1E20, 4, 3136, "f", "floatval[68]" ), # 765 (0x1E28, 4, 3140, "f", "floatval[69]" ), # 766 (0x1E30, 4, 3144, "f", "floatval[70]" ), # 767 (0x1E38, 4, 3148, "f", "floatval[71]" ), # 768 (0x1E40, 4, 3152, "f", "floatval[72]" ), # 769 (0x1E48, 4, 3156, "f", "floatval[73]" ), # 770 (0x1E50, 4, 3160, "f", "floatval[74]" ), # 771 (0x1E58, 4, 3164, "f", "floatval[75]" ), # 772 (0x1E60, 4, 3168, "f", "floatval[76]" ), # 773 (0x1E68, 4, 3172, "f", "floatval[77]" ), # 774 (0x1E70, 4, 3176, "f", "floatval[78]" ), # 775 (0x1E78, 4, 3180, "f", "floatval[79]" ), # 776 (0x1E80, 4, 3184, "f", "floatval[80]" ), # 777 (0x1E88, 4, 3188, "f", "floatval[81]" ), # 778 (0x1E90, 4, 3192, "f", "floatval[82]" ), # 779 (0x1E98, 4, 3196, "f", "floatval[83]" ), # 780 (0x1EA0, 4, 3200, "f", "floatval[84]" ), # 781 (0x1EA8, 4, 3204, "f", "floatval[85]" ), # 782 (0x1EB0, 4, 3208, "f", "floatval[86]" ), # 783 (0x1EB8, 4, 3212, "f", "floatval[87]" ), # 784 (0x1EC0, 4, 3216, "f", "floatval[88]" ), # 785 (0x1EC8, 4, 3220, "f", "floatval[89]" ), # 786 (0x1ED0, 4, 3224, "f", "floatval[90]" ), # 787 (0x1ED8, 4, 3228, "f", "floatval[91]" ), # 788 (0x1EE0, 4, 3232, "f", "floatval[92]" ), # 789 (0x1EE8, 4, 3236, "f", "floatval[93]" ), # 790 (0x1EF0, 4, 3240, "f", "floatval[94]" ), # 791 (0x1EF8, 4, 3244, "f", "floatval[95]" ), # 792 (0x1F00, 4, 3248, "f", "floatval[96]" ), # 793 (0x1F08, 4, 3252, "f", "floatval[97]" ), # 794 (0x1F10, 4, 3256, "f", "floatval[98]" ), # 795 (0x1F18, 4, 3260, "f", "floatval[99]" ), # 796 (0x1F20, 4, 3264, "f", "floatval[100]" ), # 797 (0x1F28, 4, 3268, "f", "floatval[101]" ), # 798 (0x1F30, 4, 3272, "f", "floatval[102]" ), # 799 (0x1F38, 4, 3276, "f", "floatval[103]" ), # 800 (0x1F40, 4, 3280, "f", "floatval[104]" ), # 801 (0x1F48, 4, 3284, "f", "floatval[105]" ), # 802 (0x1F50, 4, 3288, "f", "floatval[106]" ), # 803 (0x1F58, 4, 3292, "f", "floatval[107]" ), # 804 (0x1F60, 4, 3296, "f", "floatval[108]" ), # 805 (0x1F68, 4, 3300, "f", "floatval[109]" ), # 806 (0x1F70, 4, 3304, "f", "floatval[110]" ), # 807 (0x1F78, 4, 3308, "f", "floatval[111]" ), # 808 (0x1F80, 4, 3312, "f", "floatval[112]" ), # 809 (0x1F88, 4, 3316, "f", "floatval[113]" ), # 810 (0x1F90, 4, 3320, "f", "floatval[114]" ), # 811 (0x1F98, 4, 3324, "f", "floatval[115]" ), # 812 (0x1FA0, 4, 3328, "f", "floatval[116]" ), # 813 (0x1FA8, 4, 3332, "f", "floatval[117]" ), # 814 (0x1FB0, 4, 3336, "f", "floatval[118]" ), # 815 (0x1FB8, 4, 3340, "f", "floatval[119]" ), # 816 (0x1FC0, 4, 3344, "f", "floatval[120]" ), # 817 (0x1FC8, 4, 3348, "f", "floatval[121]" ), # 818 (0x1FD0, 4, 3352, "f", "floatval[122]" ), # 819 (0x1FD8, 4, 3356, "f", "floatval[123]" ), # 820 (0x1FE0, 4, 3360, "f", "floatval[124]" ), # 821 (0x1FE8, 4, 3364, "f", "floatval[125]" ), # 822 (0x1FF0, 4, 3368, "f", "floatval[126]" ), # 823 (0x1FF8, 4, 3372, "f", "floatval[127]" ), # 824 (0x2000, 4, 3376, "f", "floatval[128]" ), # 825 (0x2008, 4, 3380, "f", "floatval[129]" ), # 826 (0x2010, 4, 3384, "f", "floatval[130]" ), # 827 (0x2018, 4, 3388, "f", "floatval[131]" ), # 828 (0x2020, 4, 3392, "f", "floatval[132]" ), # 829 (0x2028, 4, 3396, "f", "floatval[133]" ), # 830 (0x2030, 4, 3400, "f", "floatval[134]" ), # 831 (0x2038, 4, 3404, "f", "floatval[135]" ), # 832 (0x2040, 4, 3408, "f", "floatval[136]" ), # 833 (0x2048, 4, 3412, "f", "floatval[137]" ), # 834 (0x2050, 4, 3416, "f", "floatval[138]" ), # 835 (0x2058, 4, 3420, "f", "floatval[139]" ), # 836 (0x2060, 4, 3424, "f", "floatval[140]" ), # 837 (0x2068, 4, 3428, "f", "floatval[141]" ), # 838 (0x2070, 4, 3432, "f", "floatval[142]" ), # 839 (0x2078, 4, 3436, "f", "floatval[143]" ), # 840 (0x2080, 4, 3440, "f", "floatval[144]" ), # 841 (0x2088, 4, 3444, "f", "floatval[145]" ), # 842 (0x2090, 4, 3448, "f", "floatval[146]" ), # 843 (0x2098, 4, 3452, "f", "floatval[147]" ), # 844 (0x20A0, 4, 3456, "f", "floatval[148]" ), # 845 (0x20A8, 4, 3460, "f", "floatval[149]" ), # 846 (0x20B0, 4, 3464, "f", "floatval[150]" ), # 847 (0x20B8, 4, 3468, "f", "floatval[151]" ), # 848 (0x20C0, 4, 3472, "f", "floatval[152]" ), # 849 (0x20C8, 4, 3476, "f", "floatval[153]" ), # 850 (0x20D0, 4, 3480, "f", "floatval[154]" ), # 851 (0x20D8, 4, 3484, "f", "floatval[155]" ), # 852 (0x20E0, 4, 3488, "f", "floatval[156]" ), # 853 (0x20E8, 4, 3492, "f", "floatval[157]" ), # 854 (0x20F0, 4, 3496, "f", "floatval[158]" ), # 855 (0x20F8, 4, 3500, "f", "floatval[159]" ), # 856 (0x2100, 4, 3504, "f", "floatval[160]" ), # 857 (0x2108, 4, 3508, "f", "floatval[161]" ), # 858 (0x2110, 4, 3512, "f", "floatval[162]" ), # 859 (0x2118, 4, 3516, "f", "floatval[163]" ), # 860 (0x2120, 4, 3520, "f", "floatval[164]" ), # 861 (0x2128, 4, 3524, "f", "floatval[165]" ), # 862 (0x2130, 4, 3528, "f", "floatval[166]" ), # 863 (0x2138, 4, 3532, "f", "floatval[167]" ), # 864 (0x2140, 4, 3536, "f", "floatval[168]" ), # 865 (0x2148, 4, 3540, "f", "floatval[169]" ), # 866 (0x2150, 4, 3544, "f", "floatval[170]" ), # 867 (0x2158, 4, 3548, "f", "floatval[171]" ), # 868 (0x2160, 4, 3552, "f", "floatval[172]" ), # 869 (0x2168, 4, 3556, "f", "floatval[173]" ), # 870 (0x2170, 4, 3560, "f", "floatval[174]" ), # 871 (0x2178, 4, 3564, "f", "floatval[175]" ), # 872 (0x2180, 4, 3568, "f", "floatval[176]" ), # 873 (0x2188, 4, 3572, "f", "floatval[177]" ), # 874 (0x2190, 4, 3576, "f", "floatval[178]" ), # 875 (0x2198, 4, 3580, "f", "floatval[179]" ), # 876 (0x21A0, 4, 3584, "f", "floatval[180]" ), # 877 (0x21A8, 4, 3588, "f", "floatval[181]" ), # 878 (0x21B0, 4, 3592, "f", "floatval[182]" ), # 879 (0x21B8, 4, 3596, "f", "floatval[183]" ), # 880 (0x21C0, 4, 3600, "f", "floatval[184]" ), # 881 (0x21C8, 4, 3604, "f", "floatval[185]" ), # 882 (0x21D0, 4, 3608, "f", "floatval[186]" ), # 883 (0x21D8, 4, 3612, "f", "floatval[187]" ), # 884 (0x21E0, 4, 3616, "f", "floatval[188]" ), # 885 (0x21E8, 4, 3620, "f", "floatval[189]" ), # 886 (0x21F0, 4, 3624, "f", "floatval[190]" ), # 887 (0x21F8, 4, 3628, "f", "floatval[191]" ), # 888 (0x2200, 4, 3632, "f", "floatval[192]" ), # 889 (0x2208, 4, 3636, "f", "floatval[193]" ), # 890 (0x2210, 4, 3640, "f", "floatval[194]" ), # 891 (0x2218, 4, 3644, "f", "floatval[195]" ), # 892 (0x2220, 4, 3648, "f", "floatval[196]" ), # 893 (0x2228, 4, 3652, "f", "floatval[197]" ), # 894 (0x2230, 4, 3656, "f", "floatval[198]" ), # 895 (0x2238, 4, 3660, "f", "floatval[199]" ), # 896 (0x2240, 4, 3664, "f", "floatval[200]" ), # 897 (0x2248, 4, 3668, "f", "floatval[201]" ), # 898 (0x2250, 4, 3672, "f", "floatval[202]" ), # 899 (0x2258, 4, 3676, "f", "floatval[203]" ), # 900 (0x2260, 4, 3680, "f", "floatval[204]" ), # 901 (0x2268, 4, 3684, "f", "floatval[205]" ), # 902 (0x2270, 4, 3688, "f", "floatval[206]" ), # 903 (0x2278, 4, 3692, "f", "floatval[207]" ), # 904 (0x2280, 4, 3696, "f", "floatval[208]" ), # 905 (0x2288, 4, 3700, "f", "floatval[209]" ), # 906 (0x2290, 4, 3704, "f", "floatval[210]" ), # 907 (0x2298, 4, 3708, "f", "floatval[211]" ), # 908 (0x22A0, 4, 3712, "f", "floatval[212]" ), # 909 (0x22A8, 4, 3716, "f", "floatval[213]" ), # 910 (0x22B0, 4, 3720, "f", "floatval[214]" ), # 911 (0x22B8, 4, 3724, "f", "floatval[215]" ), # 912 (0x22C0, 4, 3728, "f", "floatval[216]" ), # 913 (0x22C8, 4, 3732, "f", "floatval[217]" ), # 914 (0x22D0, 4, 3736, "f", "floatval[218]" ), # 915 (0x22D8, 4, 3740, "f", "floatval[219]" ), # 916 (0x22E0, 4, 3744, "f", "floatval[220]" ), # 917 (0x22E8, 4, 3748, "f", "floatval[221]" ), # 918 (0x22F0, 4, 3752, "f", "floatval[222]" ), # 919 (0x22F8, 4, 3756, "f", "floatval[223]" ), # 920 (0x2300, 4, 3760, "f", "floatval[224]" ), # 921 (0x2308, 4, 3764, "f", "floatval[225]" ), # 922 (0x2310, 4, 3768, "f", "floatval[226]" ), # 923 (0x2318, 4, 3772, "f", "floatval[227]" ), # 924 (0x2320, 4, 3776, "f", "floatval[228]" ), # 925 (0x2328, 4, 3780, "f", "floatval[229]" ), # 926 (0x2330, 4, 3784, "f", "floatval[230]" ), # 927 (0x2338, 4, 3788, "f", "floatval[231]" ), # 928 (0x2340, 4, 3792, "f", "floatval[232]" ), # 929 (0x2348, 4, 3796, "f", "floatval[233]" ), # 930 (0x2350, 4, 3800, "f", "floatval[234]" ), # 931 (0x2358, 4, 3804, "f", "floatval[235]" ), # 932 (0x2360, 4, 3808, "f", "floatval[236]" ), # 933 (0x2368, 4, 3812, "f", "floatval[237]" ), # 934 (0x2370, 4, 3816, "f", "floatval[238]" ), # 935 (0x2378, 4, 3820, "f", "floatval[239]" ), # 936 (0x2380, 4, 3824, "f", "floatval[240]" ), # 937 (0x2388, 4, 3828, "f", "floatval[241]" ), # 938 (0x2390, 4, 3832, "f", "floatval[242]" ), # 939 (0x2398, 4, 3836, "f", "floatval[243]" ), # 940 (0x23A0, 4, 3840, "f", "floatval[244]" ), # 941 (0x23A8, 4, 3844, "f", "floatval[245]" ), # 942 (0x23B0, 4, 3848, "f", "floatval[246]" ), # 943 (0x23B8, 4, 3852, "f", "floatval[247]" ), # 944 (0x23C0, 4, 3856, "f", "floatval[248]" ), # 945 (0x23C8, 4, 3860, "f", "floatval[249]" ), # 946 (0x23D0, 4, 3864, "f", "floatval[250]" ), # 947 (0x23D8, 4, 3868, "f", "floatval[251]" ), # 948 (0x23E0, 4, 3872, "f", "floatval[252]" ), # 949 (0x23E8, 4, 3876, "f", "floatval[253]" ), # 950 (0x23F0, 4, 3880, "f", "floatval[254]" ), # 951 (0x23F8, 4, 3884, "f", "floatval[255]" ), # 952 (0x2800, 4, 3888, "i", "errcode" ), # 953 (0x2802, 4, 3892, "i", "bTeachMode" ), # 954 (0x2804, 4, 3896, "i", "bSPILargeFrame" ), # 955 (0x2806, 4, 3900, "i", "spiio_type" ), # 956 (0x2808, 4, 3904, "i", "enc_type[0]" ), # 957 (0x2809, 4, 3908, "i", "enc_type[1]" ), # 958 (0x280A, 4, 3912, "i", "enc_type[2]" ), # 959 (0x280B, 4, 3916, "i", "enc_type[3]" ), # 960 (0x280C, 4, 3920, "i", "enc_type[4]" ), # 961 (0x280D, 4, 3924, "i", "enc_type[5]" ), # 962 (0x280E, 4, 3928, "i", "enc_type[6]" ), # 963 (0x280F, 4, 3932, "i", "enc_type[7]" ), # 964 (0x2810, 4, 3936, "i", "brk_type[0]" ), # 965 (0x2811, 4, 3940, "i", "brk_type[1]" ), # 966 (0x2812, 4, 3944, "i", "brk_type[2]" ), # 967 (0x2813, 4, 3948, "i", "brk_type[3]" ), # 968 (0x2814, 4, 3952, "i", "brk_type[4]" ), # 969 (0x2815, 4, 3956, "i", "brk_type[5]" ), # 970 (0x2816, 4, 3960, "i", "brk_type[6]" ), # 971 (0x2817, 4, 3964, "i", "brk_type[7]" ), # 972 (0x2C00, 36, 3968, "36s", "manip_type" ), # 973 (0x2C24, 36, 4004, "36s", "manip_serial" ), # 974 (0x2C48, 4, 4040, "i", "format_version[0]" ), # 975 (0x2C4C, 4, 4044, "i", "format_version[1]" ), # 976 (0x2C50, 4, 4048, "i", "format_version[2]" ), # 977 (0x2C54, 4, 4052, "i", "parameter_version[0]" ), # 978 (0x2C58, 4, 4056, "i", "parameter_version[1]" ), # 979 (0x2C5C, 4, 4060, "i", "parameter_version[2]" ), # 980 (0x2C60, 4, 4064, "i", "user_home_sts" ), # 981 (0x2C64, 4, 4068, "i", "enc_type[0]" ), # 982 (0x2C65, 4, 4072, "i", "enc_type[1]" ), # 983 (0x2C66, 4, 4076, "i", "enc_type[2]" ), # 984 (0x2C67, 4, 4080, "i", "enc_type[3]" ), # 985 (0x2C68, 4, 4084, "i", "enc_type[4]" ), # 986 (0x2C69, 4, 4088, "i", "enc_type[5]" ), # 987 (0x2C6A, 4, 4092, "i", "enc_type[6]" ), # 988 (0x2C6B, 4, 4096, "i", "enc_type[7]" ), # 989 (0x2C6C, 4, 4100, "i", "brk_type[0]" ), # 990 (0x2C6D, 4, 4104, "i", "brk_type[1]" ), # 991 (0x2C6E, 4, 4108, "i", "brk_type[2]" ), # 992 (0x2C6F, 4, 4112, "i", "brk_type[3]" ), # 993 (0x2C70, 4, 4116, "i", "brk_type[4]" ), # 994 (0x2C71, 4, 4120, "i", "brk_type[5]" ), # 995 (0x2C72, 4, 4124, "i", "brk_type[6]" ), # 996 (0x2C73, 4, 4128, "i", "brk_type[7]" ), # 997 (0x2C74, 4, 4132, "i", "ecat_tmctl[0]" ), # 998 (0x2C75, 4, 4136, "i", "ecat_tmctl[1]" ), # 999 (0x2C76, 4, 4140, "i", "ecat_tmctl[2]" ), # 1000 (0x2C77, 4, 4144, "i", "ecat_tmctl[3]" ), # 1001 (0x2C78, 4, 4148, "i", "ecat_tmctl[4]" ), # 1002 (0x2C79, 4, 4152, "i", "ecat_tmctl[5]" ), # 1003 (0x2C7A, 4, 4156, "i", "ecat_tmctl[6]" ), # 1004 (0x2C7B, 4, 4160, "i", "ecat_tmctl[7]" ), # 1005 (0x2C7C, 4, 4164, "i", "cyclic_data_type[0]" ), # 1006 (0x2C7D, 4, 4168, "i", "cyclic_data_type[1]" ), # 1007 (0x2C7E, 4, 4172, "i", "cyclic_data_type[2]" ), # 1008 (0x2C7F, 4, 4176, "i", "cyclic_data_type[3]" ), # 1009 (0x2C80, 4, 4180, "i", "cyclic_data_type[4]" ), # 1010 (0x2C81, 4, 4184, "i", "cyclic_data_type[5]" ), # 1011 (0x2C82, 4, 4188, "i", "cyclic_data_type[6]" ), # 1012 (0x2C83, 4, 4192, "i", "cyclic_data_type[7]" ), # 1013 (0x2C84, 4, 4196, "i", "evt_ctrl_type[0]" ), # 1014 (0x2C85, 4, 4200, "i", "evt_ctrl_type[1]" ), # 1015 (0x2C86, 4, 4204, "i", "evt_ctrl_type[2]" ), # 1016 (0x2C87, 4, 4208, "i", "evt_ctrl_type[3]" ), # 1017 (0x2C88, 4, 4212, "i", "evt_ctrl_type[4]" ), # 1018 (0x2C89, 4, 4216, "i", "evt_ctrl_type[5]" ), # 1019 (0x2C8A, 4, 4220, "i", "evt_ctrl_type[6]" ), # 1020 (0x2C8B, 4, 4224, "i", "evt_ctrl_type[7]" ), # 1021 (0x2C8C, 4, 4228, "i", "vender_id[0]" ), # 1022 (0x2C90, 4, 4232, "i", "vender_id[1]" ), # 1023 (0x2C94, 4, 4236, "i", "vender_id[2]" ), # 1024 (0x2C98, 4, 4240, "i", "vender_id[3]" ), # 1025 (0x2C9C, 4, 4244, "i", "vender_id[4]" ), # 1026 (0x2CA0, 4, 4248, "i", "vender_id[5]" ), # 1027 (0x2CA4, 4, 4252, "i", "vender_id[6]" ), # 1028 (0x2CA8, 4, 4256, "i", "vender_id[7]" ), # 1029 (0x2CAC, 4, 4260, "i", "product_code[0]" ), # 1030 (0x2CB0, 4, 4264, "i", "product_code[1]" ), # 1031 (0x2CB4, 4, 4268, "i", "product_code[2]" ), # 1032 (0x2CB8, 4, 4272, "i", "product_code[3]" ), # 1033 (0x2CBC, 4, 4276, "i", "product_code[4]" ), # 1034 (0x2CC0, 4, 4280, "i", "product_code[5]" ), # 1035 (0x2CC4, 4, 4284, "i", "product_code[6]" ), # 1036 (0x2CC8, 4, 4288, "i", "product_code[7]" ), # 1037 (0x2CCC, 4, 4292, "i", "armio_vender_id" ), # 1038 (0x2CD0, 4, 4296, "i", "armio_product_code" ), # 1039 (0x2CD4, 4, 4300, "i", "crc_enabled[0]" ), # 1040 (0x2CD5, 4, 4304, "i", "crc_enabled[1]" ), # 1041 (0x2CD6, 4, 4308, "i", "crc_enabled[2]" ), # 1042 (0x2CD7, 4, 4312, "i", "crc_enabled[3]" ), # 1043 (0x2CD8, 4, 4316, "i", "crc_enabled[4]" ), # 1044 (0x2CD9, 4, 4320, "i", "crc_enabled[5]" ), # 1045 (0x2CDA, 4, 4324, "i", "crc_enabled[6]" ), # 1046 (0x2CDB, 4, 4328, "i", "crc_enabled[7]" ), # 1047 (0x2CDC, 4, 4332, "i", "axis_type[0]" ), # 1048 (0x2CDD, 4, 4336, "i", "axis_type[1]" ), # 1049 (0x2CDE, 4, 4340, "i", "axis_type[2]" ), # 1050 (0x2CDF, 4, 4344, "i", "axis_type[3]" ), # 1051 (0x2CE0, 4, 4348, "i", "axis_type[4]" ), # 1052 (0x2CE1, 4, 4352, "i", "axis_type[5]" ), # 1053 (0x2CE2, 4, 4356, "i", "axis_type[6]" ), # 1054 (0x2CE3, 4, 4360, "i", "axis_type[7]" ), # 1055 (0x2CE4, 4, 4364, "i", "axis_type_get[0]" ), # 1056 (0x2CE5, 4, 4368, "i", "axis_type_get[1]" ), # 1057 (0x2CE6, 4, 4372, "i", "axis_type_get[2]" ), # 1058 (0x2CE7, 4, 4376, "i", "axis_type_get[3]" ), # 1059 (0x2CE8, 4, 4380, "i", "axis_type_get[4]" ), # 1060 (0x2CE9, 4, 4384, "i", "axis_type_get[5]" ), # 1061 (0x2CEA, 4, 4388, "i", "axis_type_get[6]" ), # 1062 (0x2CEB, 4, 4392, "i", "axis_type_get[7]" ), # 1063 (0x3000, 4, 4396, "f", "cmdx" ), # 1064 (0x3008, 4, 4400, "f", "cmdy" ), # 1065 (0x3010, 4, 4404, "f", "cmdz" ), # 1066 (0x3018, 4, 4408, "f", "cmdrz" ), # 1067 (0x3020, 4, 4412, "f", "cmdry" ), # 1068 (0x3028, 4, 4416, "f", "cmdrx" ), # 1069 (0x3040, 4, 4420, "i", "posture" ), # 1070 (0x3048, 4, 4424, "i", "singular" ), # 1071 (0x304C, 4, 4428, "i", "multiturn" ), # 1072 (0x3050, 4, 4432, "f", "joint[0]" ), # 1073 (0x3058, 4, 4436, "f", "joint[1]" ), # 1074 (0x3060, 4, 4440, "f", "joint[2]" ), # 1075 (0x3068, 4, 4444, "f", "joint[3]" ), # 1076 (0x3070, 4, 4448, "f", "joint[4]" ), # 1077 (0x3078, 4, 4452, "f", "joint[5]" ), # 1078 (0x3080, 4, 4456, "f", "joint[6]" ), # 1079 (0x3088, 4, 4460, "f", "joint[7]" ), # 1080 (0x3090, 4, 4464, "f", "velocity" ), # 1081 (0x3098, 4, 4468, "i", "vel_error_axes" ), # 1082 (0x309C, 4, 4472, "i", "softlimit" ), # 1083 (0x30A0, 4, 4476, "f", "joint_svon_to_svoff[0]"), # 1084 (0x30A8, 4, 4480, "f", "joint_svon_to_svoff[1]"), # 1085 (0x30B0, 4, 4484, "f", "joint_svon_to_svoff[2]"), # 1086 (0x30B8, 4, 4488, "f", "joint_svon_to_svoff[3]"), # 1087 (0x30C0, 4, 4492, "f", "joint_svon_to_svoff[4]"), # 1088 (0x30C8, 4, 4496, "f", "joint_svon_to_svoff[5]"), # 1089 (0x30D0, 4, 4500, "f", "joint_svon_to_svoff[6]"), # 1090 (0x30D8, 4, 4504, "f", "joint_svon_to_svoff[7]"), # 1091 (0x30E0, 4, 4508, "i", "b_saved" ), # 1092 (0x30E4, 4, 4512, "i", "toolno" ), # 1093 (0x30E8, 4, 4516, "f", "hdorgx" ), # 1094 (0x30F0, 4, 4520, "f", "hdorgy" ), # 1095 (0x30F8, 4, 4524, "f", "hdorgz" ), # 1096 (0x3100, 4, 4528, "f", "hdorgrz" ), # 1097 (0x3108, 4, 4532, "f", "hdorgry" ), # 1098 (0x3110, 4, 4536, "f", "hdorgrx" ), # 1099 (0x3128, 4, 4540, "f", "carte_svon_to_svoff[0]"), # 1100 (0x3130, 4, 4544, "f", "carte_svon_to_svoff[1]"), # 1101 (0x3138, 4, 4548, "f", "carte_svon_to_svoff[2]"), # 1102 (0x3140, 4, 4552, "f", "carte_svon_to_svoff[3]"), # 1103 (0x3148, 4, 4556, "f", "carte_svon_to_svoff[4]"), # 1104 (0x3150, 4, 4560, "f", "carte_svon_to_svoff[5]"), # 1105 (0x3158, 4, 4564, "f", "carte_svon_to_svoff[6]"), # 1106 (0x3160, 4, 4568, "f", "carte_svon_to_svoff[7]"), # 1107 (0x3168, 4, 4572, "i", "svon_to_svoff_posture" ), # 1108 (0x3170, 4, 4576, "i", "svon_to_svoff_singular"), # 1109 (0x3174, 4, 4580, "i", "svon_to_svoff_multiturn"), # 1110 (0x3178, 4, 4584, "i", "svon_to_svoff_toolno" ), # 1111 (0x317C, 4, 4588, "i", "bRequestHold" ), # 1112 (0x317E, 4, 4592, "i", "bRequestSuspend" ), # 1113 (0x3180, 4, 4596, "i", "bSuspended" ), # 1114 (0x3184, 4, 4600, "i", "permitted_worker_id" ), # 1115 (0x3188, 4, 4604, "f", "tool_org_params[0]" ), # 1116 (0x3190, 4, 4608, "f", "tool_org_params[1]" ), # 1117 (0x3198, 4, 4612, "f", "tool_org_params[2]" ), # 1118 (0x31A0, 4, 4616, "f", "tool_org_params[3]" ), # 1119 (0x31A8, 4, 4620, "f", "tool_org_params[4]" ), # 1120 (0x31B0, 4, 4624, "f", "tool_org_params[5]" ), # 1121 (0x31B8, 4, 4628, "f", "tool_fwdmatrix[0]" ), # 1122 (0x31C0, 4, 4632, "f", "tool_fwdmatrix[1]" ), # 1123 (0x31C8, 4, 4636, "f", "tool_fwdmatrix[2]" ), # 1124 (0x31D0, 4, 4640, "f", "tool_fwdmatrix[3]" ), # 1125 (0x31D8, 4, 4644, "f", "tool_fwdmatrix[4]" ), # 1126 (0x31E0, 4, 4648, "f", "tool_fwdmatrix[5]" ), # 1127 (0x31E8, 4, 4652, "f", "tool_fwdmatrix[6]" ), # 1128 (0x31F0, 4, 4656, "f", "tool_fwdmatrix[7]" ), # 1129 (0x31F8, 4, 4660, "f", "tool_fwdmatrix[8]" ), # 1130 (0x3200, 4, 4664, "f", "tool_fwdmatrix[9]" ), # 1131 (0x3208, 4, 4668, "f", "tool_fwdmatrix[10]" ), # 1132 (0x3210, 4, 4672, "f", "tool_fwdmatrix[11]" ), # 1133 (0x3218, 4, 4676, "i", "last_hold_factor" ), # 1134 (0x3219, 4, 4680, "i", "vdesc0_sts" ), # 1135 (0x321A, 4, 4684, "i", "vdesc1_sts" ), # 1136 (0x321B, 4, 4688, "i", "n_queued" ), # 1137 (0x321C, 4, 4692, "i", "logical_cmd_pulse[0]" ), # 1138 (0x3220, 4, 4696, "i", "logical_cmd_pulse[1]" ), # 1139 (0x3224, 4, 4700, "i", "logical_cmd_pulse[2]" ), # 1140 (0x3228, 4, 4704, "i", "logical_cmd_pulse[3]" ), # 1141 (0x322C, 4, 4708, "i", "logical_cmd_pulse[4]" ), # 1142 (0x3230, 4, 4712, "i", "logical_cmd_pulse[5]" ), # 1143 (0x3234, 4, 4716, "i", "logical_cmd_pulse[6]" ), # 1144 (0x3238, 4, 4720, "i", "logical_cmd_pulse[7]" ), # 1145 (0x323C, 4, 4724, "i", "logical_fb_pulse[0]" ), # 1146 (0x3240, 4, 4728, "i", "logical_fb_pulse[1]" ), # 1147 (0x3244, 4, 4732, "i", "logical_fb_pulse[2]" ), # 1148 (0x3248, 4, 4736, "i", "logical_fb_pulse[3]" ), # 1149 (0x324C, 4, 4740, "i", "logical_fb_pulse[4]" ), # 1150 (0x3250, 4, 4744, "i", "logical_fb_pulse[5]" ), # 1151 (0x3254, 4, 4748, "i", "logical_fb_pulse[6]" ), # 1152 (0x3258, 4, 4752, "i", "logical_fb_pulse[7]" ), # 1153 (0x325C, 4, 4756, "i", "holdinfo" ), # 1154 (0x3260, 4, 4760, "i", "svsts" ), # 1155 (0x3264, 4, 4764, "i", "manip_pwr" ), # 1156 (0x3266, 4, 4768, "i", "ems" ), # 1157 (0x3268, 4, 4772, "i", "vdesc0_mvid" ), # 1158 (0x326C, 4, 4776, "i", "vdesc1_mvid" ), # 1159 (0x3270, 4, 4780, "i", "linkvo" ), # 1160 (0x3274, 4, 4784, "i", "system_status" ), # PC에서 쓰는 정보. Shared Memory Update Process와 무관함 ) ZeusShmSize:int = 4784 #eof
zeusrobot
/_zeusrobot/zeus_shared_memory/zeus_shared_memory_table.py
zeus_shared_memory_table.py
_shm_num_data = 1161 _shm_format=[ "i", # 0 "i", # 1 "Q", # 2 "Q", # 3 "Q", # 4 "i", # 5 "i", # 6 "i", # 7 "i", # 8 "i", # 9 "i", # 10 "i", # 11 "i", # 12 "i", # 13 "i", # 14 "i", # 15 "i", # 16 "i", # 17 "i", # 18 "i", # 19 "i", # 20 "i", # 21 "i", # 22 "i", # 23 "i", # 24 "i", # 25 "i", # 26 "i", # 27 "i", # 28 "i", # 29 "i", # 30 "i", # 31 "i", # 32 "i", # 33 "i", # 34 "i", # 35 "i", # 36 "i", # 37 "i", # 38 "i", # 39 "i", # 40 "i", # 41 "i", # 42 "i", # 43 "i", # 44 "i", # 45 "i", # 46 "i", # 47 "i", # 48 "i", # 49 "i", # 50 "i", # 51 "i", # 52 "i", # 53 "i", # 54 "i", # 55 "i", # 56 "i", # 57 "i", # 58 "i", # 59 "i", # 60 "i", # 61 "i", # 62 "i", # 63 "i", # 64 "i", # 65 "i", # 66 "i", # 67 "i", # 68 "i", # 69 "i", # 70 "i", # 71 "i", # 72 "i", # 73 "i", # 74 "i", # 75 "i", # 76 "i", # 77 "i", # 78 "i", # 79 "i", # 80 "i", # 81 "i", # 82 "i", # 83 "i", # 84 "i", # 85 "i", # 86 "i", # 87 "i", # 88 "i", # 89 "i", # 90 "i", # 91 "i", # 92 "i", # 93 "i", # 94 "i", # 95 "i", # 96 "i", # 97 "i", # 98 "i", # 99 "i", # 100 "i", # 101 "i", # 102 "i", # 103 "i", # 104 "i", # 105 "i", # 106 "i", # 107 "i", # 108 "i", # 109 "i", # 110 "i", # 111 "i", # 112 "i", # 113 "i", # 114 "i", # 115 "i", # 116 "i", # 117 "i", # 118 "i", # 119 "i", # 120 "i", # 121 "i", # 122 "i", # 123 "i", # 124 "i", # 125 "i", # 126 "i", # 127 "i", # 128 "i", # 129 "i", # 130 "i", # 131 "i", # 132 "i", # 133 "i", # 134 "i", # 135 "i", # 136 "i", # 137 "i", # 138 "i", # 139 "i", # 140 "i", # 141 "i", # 142 "i", # 143 "i", # 144 "i", # 145 "i", # 146 "i", # 147 "i", # 148 "i", # 149 "i", # 150 "i", # 151 "i", # 152 "i", # 153 "i", # 154 "i", # 155 "i", # 156 "i", # 157 "i", # 158 "i", # 159 "i", # 160 "i", # 161 "i", # 162 "i", # 163 "i", # 164 "i", # 165 "i", # 166 "i", # 167 "i", # 168 "i", # 169 "i", # 170 "i", # 171 "i", # 172 "i", # 173 "i", # 174 "i", # 175 "i", # 176 "i", # 177 "i", # 178 "i", # 179 "i", # 180 "i", # 181 "i", # 182 "i", # 183 "i", # 184 "i", # 185 "i", # 186 "i", # 187 "i", # 188 "i", # 189 "i", # 190 "i", # 191 "i", # 192 "i", # 193 "i", # 194 "i", # 195 "i", # 196 "i", # 197 "i", # 198 "i", # 199 "i", # 200 "i", # 201 "i", # 202 "i", # 203 "i", # 204 "i", # 205 "i", # 206 "i", # 207 "i", # 208 "i", # 209 "i", # 210 "i", # 211 "i", # 212 "i", # 213 "i", # 214 "i", # 215 "i", # 216 "i", # 217 "i", # 218 "i", # 219 "i", # 220 "i", # 221 "i", # 222 "i", # 223 "i", # 224 "i", # 225 "i", # 226 "i", # 227 "i", # 228 "i", # 229 "i", # 230 "i", # 231 "i", # 232 "i", # 233 "i", # 234 "i", # 235 "i", # 236 "i", # 237 "i", # 238 "i", # 239 "i", # 240 "i", # 241 "i", # 242 "i", # 243 "i", # 244 "i", # 245 "i", # 246 "i", # 247 "i", # 248 "i", # 249 "i", # 250 "i", # 251 "i", # 252 "i", # 253 "i", # 254 "i", # 255 "i", # 256 "i", # 257 "i", # 258 "i", # 259 "i", # 260 "i", # 261 "i", # 262 "i", # 263 "i", # 264 "i", # 265 "i", # 266 "i", # 267 "i", # 268 "i", # 269 "i", # 270 "i", # 271 "i", # 272 "i", # 273 "i", # 274 "i", # 275 "i", # 276 "i", # 277 "i", # 278 "i", # 279 "i", # 280 "i", # 281 "i", # 282 "i", # 283 "i", # 284 "i", # 285 "i", # 286 "i", # 287 "i", # 288 "i", # 289 "i", # 290 "i", # 291 "i", # 292 "i", # 293 "i", # 294 "i", # 295 "i", # 296 "i", # 297 "i", # 298 "i", # 299 "i", # 300 "i", # 301 "i", # 302 "i", # 303 "i", # 304 "i", # 305 "i", # 306 "i", # 307 "i", # 308 "i", # 309 "i", # 310 "i", # 311 "i", # 312 "i", # 313 "i", # 314 "i", # 315 "i", # 316 "i", # 317 "i", # 318 "i", # 319 "i", # 320 "i", # 321 "i", # 322 "i", # 323 "i", # 324 "i", # 325 "i", # 326 "i", # 327 "i", # 328 "i", # 329 "i", # 330 "i", # 331 "i", # 332 "i", # 333 "i", # 334 "i", # 335 "i", # 336 "i", # 337 "i", # 338 "i", # 339 "i", # 340 "i", # 341 "i", # 342 "i", # 343 "i", # 344 "i", # 345 "i", # 346 "i", # 347 "i", # 348 "i", # 349 "i", # 350 "i", # 351 "i", # 352 "i", # 353 "i", # 354 "i", # 355 "i", # 356 "i", # 357 "i", # 358 "i", # 359 "i", # 360 "i", # 361 "i", # 362 "i", # 363 "i", # 364 "i", # 365 "i", # 366 "i", # 367 "i", # 368 "i", # 369 "i", # 370 "i", # 371 "i", # 372 "i", # 373 "i", # 374 "i", # 375 "i", # 376 "i", # 377 "i", # 378 "i", # 379 "i", # 380 "i", # 381 "i", # 382 "i", # 383 "i", # 384 "i", # 385 "i", # 386 "i", # 387 "i", # 388 "i", # 389 "i", # 390 "i", # 391 "i", # 392 "i", # 393 "i", # 394 "i", # 395 "i", # 396 "i", # 397 "i", # 398 "i", # 399 "i", # 400 "i", # 401 "i", # 402 "i", # 403 "i", # 404 "i", # 405 "i", # 406 "i", # 407 "i", # 408 "i", # 409 "i", # 410 "i", # 411 "i", # 412 "i", # 413 "i", # 414 "i", # 415 "i", # 416 "i", # 417 "f", # 418 "f", # 419 "f", # 420 "f", # 421 "f", # 422 "f", # 423 "f", # 424 "f", # 425 "36s", # 426 "36s", # 427 "i", # 428 "i", # 429 "i", # 430 "i", # 431 "i", # 432 "i", # 433 "i", # 434 "i", # 435 "i", # 436 "i", # 437 "i", # 438 "i", # 439 "i", # 440 "i", # 441 "i", # 442 "i", # 443 "i", # 444 "i", # 445 "i", # 446 "i", # 447 "i", # 448 "i", # 449 "i", # 450 "i", # 451 "i", # 452 "i", # 453 "i", # 454 "i", # 455 "i", # 456 "i", # 457 "i", # 458 "i", # 459 "i", # 460 "i", # 461 "i", # 462 "i", # 463 "i", # 464 "i", # 465 "i", # 466 "i", # 467 "i", # 468 "i", # 469 "i", # 470 "i", # 471 "i", # 472 "i", # 473 "i", # 474 "i", # 475 "i", # 476 "i", # 477 "i", # 478 "i", # 479 "i", # 480 "i", # 481 "i", # 482 "i", # 483 "i", # 484 "i", # 485 "i", # 486 "i", # 487 "i", # 488 "i", # 489 "i", # 490 "i", # 491 "i", # 492 "i", # 493 "i", # 494 "i", # 495 "i", # 496 "i", # 497 "i", # 498 "i", # 499 "i", # 500 "i", # 501 "i", # 502 "i", # 503 "i", # 504 "i", # 505 "i", # 506 "i", # 507 "i", # 508 "i", # 509 "i", # 510 "i", # 511 "i", # 512 "i", # 513 "i", # 514 "i", # 515 "i", # 516 "i", # 517 "i", # 518 "i", # 519 "i", # 520 "i", # 521 "i", # 522 "i", # 523 "i", # 524 "i", # 525 "i", # 526 "i", # 527 "i", # 528 "i", # 529 "i", # 530 "i", # 531 "i", # 532 "i", # 533 "i", # 534 "i", # 535 "i", # 536 "i", # 537 "i", # 538 "i", # 539 "i", # 540 "i", # 541 "i", # 542 "i", # 543 "i", # 544 "i", # 545 "i", # 546 "i", # 547 "i", # 548 "i", # 549 "i", # 550 "i", # 551 "i", # 552 "i", # 553 "i", # 554 "i", # 555 "i", # 556 "i", # 557 "i", # 558 "i", # 559 "i", # 560 "i", # 561 "i", # 562 "i", # 563 "i", # 564 "i", # 565 "i", # 566 "i", # 567 "i", # 568 "i", # 569 "i", # 570 "i", # 571 "i", # 572 "i", # 573 "i", # 574 "i", # 575 "i", # 576 "i", # 577 "i", # 578 "i", # 579 "i", # 580 "i", # 581 "i", # 582 "i", # 583 "i", # 584 "i", # 585 "i", # 586 "i", # 587 "i", # 588 "i", # 589 "i", # 590 "i", # 591 "i", # 592 "i", # 593 "i", # 594 "i", # 595 "i", # 596 "i", # 597 "i", # 598 "i", # 599 "i", # 600 "i", # 601 "i", # 602 "i", # 603 "i", # 604 "i", # 605 "i", # 606 "i", # 607 "i", # 608 "i", # 609 "i", # 610 "i", # 611 "i", # 612 "i", # 613 "i", # 614 "i", # 615 "i", # 616 "i", # 617 "i", # 618 "i", # 619 "i", # 620 "i", # 621 "i", # 622 "i", # 623 "i", # 624 "i", # 625 "i", # 626 "i", # 627 "i", # 628 "i", # 629 "i", # 630 "i", # 631 "i", # 632 "i", # 633 "i", # 634 "i", # 635 "i", # 636 "i", # 637 "i", # 638 "i", # 639 "i", # 640 "i", # 641 "i", # 642 "i", # 643 "i", # 644 "i", # 645 "i", # 646 "i", # 647 "i", # 648 "i", # 649 "i", # 650 "i", # 651 "i", # 652 "i", # 653 "i", # 654 "i", # 655 "i", # 656 "i", # 657 "i", # 658 "i", # 659 "i", # 660 "i", # 661 "i", # 662 "i", # 663 "i", # 664 "i", # 665 "i", # 666 "i", # 667 "i", # 668 "i", # 669 "i", # 670 "i", # 671 "i", # 672 "i", # 673 "i", # 674 "i", # 675 "i", # 676 "i", # 677 "i", # 678 "i", # 679 "i", # 680 "i", # 681 "i", # 682 "i", # 683 "i", # 684 "i", # 685 "i", # 686 "i", # 687 "i", # 688 "i", # 689 "i", # 690 "i", # 691 "i", # 692 "i", # 693 "i", # 694 "i", # 695 "i", # 696 "f", # 697 "f", # 698 "f", # 699 "f", # 700 "f", # 701 "f", # 702 "f", # 703 "f", # 704 "f", # 705 "f", # 706 "f", # 707 "f", # 708 "f", # 709 "f", # 710 "f", # 711 "f", # 712 "f", # 713 "f", # 714 "f", # 715 "f", # 716 "f", # 717 "f", # 718 "f", # 719 "f", # 720 "f", # 721 "f", # 722 "f", # 723 "f", # 724 "f", # 725 "f", # 726 "f", # 727 "f", # 728 "f", # 729 "f", # 730 "f", # 731 "f", # 732 "f", # 733 "f", # 734 "f", # 735 "f", # 736 "f", # 737 "f", # 738 "f", # 739 "f", # 740 "f", # 741 "f", # 742 "f", # 743 "f", # 744 "f", # 745 "f", # 746 "f", # 747 "f", # 748 "f", # 749 "f", # 750 "f", # 751 "f", # 752 "f", # 753 "f", # 754 "f", # 755 "f", # 756 "f", # 757 "f", # 758 "f", # 759 "f", # 760 "f", # 761 "f", # 762 "f", # 763 "f", # 764 "f", # 765 "f", # 766 "f", # 767 "f", # 768 "f", # 769 "f", # 770 "f", # 771 "f", # 772 "f", # 773 "f", # 774 "f", # 775 "f", # 776 "f", # 777 "f", # 778 "f", # 779 "f", # 780 "f", # 781 "f", # 782 "f", # 783 "f", # 784 "f", # 785 "f", # 786 "f", # 787 "f", # 788 "f", # 789 "f", # 790 "f", # 791 "f", # 792 "f", # 793 "f", # 794 "f", # 795 "f", # 796 "f", # 797 "f", # 798 "f", # 799 "f", # 800 "f", # 801 "f", # 802 "f", # 803 "f", # 804 "f", # 805 "f", # 806 "f", # 807 "f", # 808 "f", # 809 "f", # 810 "f", # 811 "f", # 812 "f", # 813 "f", # 814 "f", # 815 "f", # 816 "f", # 817 "f", # 818 "f", # 819 "f", # 820 "f", # 821 "f", # 822 "f", # 823 "f", # 824 "f", # 825 "f", # 826 "f", # 827 "f", # 828 "f", # 829 "f", # 830 "f", # 831 "f", # 832 "f", # 833 "f", # 834 "f", # 835 "f", # 836 "f", # 837 "f", # 838 "f", # 839 "f", # 840 "f", # 841 "f", # 842 "f", # 843 "f", # 844 "f", # 845 "f", # 846 "f", # 847 "f", # 848 "f", # 849 "f", # 850 "f", # 851 "f", # 852 "f", # 853 "f", # 854 "f", # 855 "f", # 856 "f", # 857 "f", # 858 "f", # 859 "f", # 860 "f", # 861 "f", # 862 "f", # 863 "f", # 864 "f", # 865 "f", # 866 "f", # 867 "f", # 868 "f", # 869 "f", # 870 "f", # 871 "f", # 872 "f", # 873 "f", # 874 "f", # 875 "f", # 876 "f", # 877 "f", # 878 "f", # 879 "f", # 880 "f", # 881 "f", # 882 "f", # 883 "f", # 884 "f", # 885 "f", # 886 "f", # 887 "f", # 888 "f", # 889 "f", # 890 "f", # 891 "f", # 892 "f", # 893 "f", # 894 "f", # 895 "f", # 896 "f", # 897 "f", # 898 "f", # 899 "f", # 900 "f", # 901 "f", # 902 "f", # 903 "f", # 904 "f", # 905 "f", # 906 "f", # 907 "f", # 908 "f", # 909 "f", # 910 "f", # 911 "f", # 912 "f", # 913 "f", # 914 "f", # 915 "f", # 916 "f", # 917 "f", # 918 "f", # 919 "f", # 920 "f", # 921 "f", # 922 "f", # 923 "f", # 924 "f", # 925 "f", # 926 "f", # 927 "f", # 928 "f", # 929 "f", # 930 "f", # 931 "f", # 932 "f", # 933 "f", # 934 "f", # 935 "f", # 936 "f", # 937 "f", # 938 "f", # 939 "f", # 940 "f", # 941 "f", # 942 "f", # 943 "f", # 944 "f", # 945 "f", # 946 "f", # 947 "f", # 948 "f", # 949 "f", # 950 "f", # 951 "f", # 952 "i", # 953 "i", # 954 "i", # 955 "i", # 956 "i", # 957 "i", # 958 "i", # 959 "i", # 960 "i", # 961 "i", # 962 "i", # 963 "i", # 964 "i", # 965 "i", # 966 "i", # 967 "i", # 968 "i", # 969 "i", # 970 "i", # 971 "i", # 972 "36s", # 973 "36s", # 974 "i", # 975 "i", # 976 "i", # 977 "i", # 978 "i", # 979 "i", # 980 "i", # 981 "i", # 982 "i", # 983 "i", # 984 "i", # 985 "i", # 986 "i", # 987 "i", # 988 "i", # 989 "i", # 990 "i", # 991 "i", # 992 "i", # 993 "i", # 994 "i", # 995 "i", # 996 "i", # 997 "i", # 998 "i", # 999 "i", # 1000 "i", # 1001 "i", # 1002 "i", # 1003 "i", # 1004 "i", # 1005 "i", # 1006 "i", # 1007 "i", # 1008 "i", # 1009 "i", # 1010 "i", # 1011 "i", # 1012 "i", # 1013 "i", # 1014 "i", # 1015 "i", # 1016 "i", # 1017 "i", # 1018 "i", # 1019 "i", # 1020 "i", # 1021 "i", # 1022 "i", # 1023 "i", # 1024 "i", # 1025 "i", # 1026 "i", # 1027 "i", # 1028 "i", # 1029 "i", # 1030 "i", # 1031 "i", # 1032 "i", # 1033 "i", # 1034 "i", # 1035 "i", # 1036 "i", # 1037 "i", # 1038 "i", # 1039 "i", # 1040 "i", # 1041 "i", # 1042 "i", # 1043 "i", # 1044 "i", # 1045 "i", # 1046 "i", # 1047 "i", # 1048 "i", # 1049 "i", # 1050 "i", # 1051 "i", # 1052 "i", # 1053 "i", # 1054 "i", # 1055 "i", # 1056 "i", # 1057 "i", # 1058 "i", # 1059 "i", # 1060 "i", # 1061 "i", # 1062 "i", # 1063 "f", # 1064 "f", # 1065 "f", # 1066 "f", # 1067 "f", # 1068 "f", # 1069 "i", # 1070 "i", # 1071 "i", # 1072 "f", # 1073 "f", # 1074 "f", # 1075 "f", # 1076 "f", # 1077 "f", # 1078 "f", # 1079 "f", # 1080 "f", # 1081 "i", # 1082 "i", # 1083 "f", # 1084 "f", # 1085 "f", # 1086 "f", # 1087 "f", # 1088 "f", # 1089 "f", # 1090 "f", # 1091 "i", # 1092 "i", # 1093 "f", # 1094 "f", # 1095 "f", # 1096 "f", # 1097 "f", # 1098 "f", # 1099 "f", # 1100 "f", # 1101 "f", # 1102 "f", # 1103 "f", # 1104 "f", # 1105 "f", # 1106 "f", # 1107 "i", # 1108 "i", # 1109 "i", # 1110 "i", # 1111 "i", # 1112 "i", # 1113 "i", # 1114 "i", # 1115 "f", # 1116 "f", # 1117 "f", # 1118 "f", # 1119 "f", # 1120 "f", # 1121 "f", # 1122 "f", # 1123 "f", # 1124 "f", # 1125 "f", # 1126 "f", # 1127 "f", # 1128 "f", # 1129 "f", # 1130 "f", # 1131 "f", # 1132 "f", # 1133 "i", # 1134 "i", # 1135 "i", # 1136 "i", # 1137 "i", # 1138 "i", # 1139 "i", # 1140 "i", # 1141 "i", # 1142 "i", # 1143 "i", # 1144 "i", # 1145 "i", # 1146 "i", # 1147 "i", # 1148 "i", # 1149 "i", # 1150 "i", # 1151 "i", # 1152 "i", # 1153 "i", # 1154 "i", # 1155 "i", # 1156 "i", # 1157 "i", # 1158 "i", # 1159 "i", # 1160 ]
zeusrobot
/_zeusrobot/zeus_shared_memory/zeus_shared_memory_format.py
zeus_shared_memory_format.py
# Zeus the Investigator ![ZeusTheInvestigator0.1.7-preview](https://user-images.githubusercontent.com/76993204/170004945-e1ad079f-d1eb-46f5-9da2-51bd452bf635.gif) #### **USAGE** ```powershell $ zeus -u <first_url> -u <second_url> -c <cooldown(in secs. default=3)> ``` # Installation ## Manual Installation **Note:** *You will need poetry for manual installation* ```bash $ pip install poetry ``` 1. Download or clone the repository. ```git $ git clone https://github.com/777advait/Zeus-The-Investigator ``` 2. Install the project by running the following command in the root of the directory. ``` bash $ poetry install ``` ## PyPi Installation ```bash $ pip install ZeusTheInvestigator ```
zeustheinvestigator
/ZeusTheInvestigator-0.1.7.tar.gz/ZeusTheInvestigator-0.1.7/README.md
README.md
import urllib3 from typing import List, Optional from rich.live import Live from rich.table import Table from rich.console import Console import typer import time import re import requests console = Console() urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) def check_site(url_list: List[str]): """main function to check the status of websites.""" # configuring table table = Table() table.add_column("WEBSITE", justify="center") table.add_column("STATUS", justify="center") table.add_column("Last Request", justify="center") # validating URLS url_list = [validate_url(urls) for urls in url_list] for urls in url_list: try: requests.get(url=urls, timeout=2.5, verify=False) table.add_row(urls, "[green]ONLINE", f"{time.strftime('%H:%M:%S', time.localtime())}") except requests.exceptions.ConnectionError: # if the site is not online, the program will raise an exception. table.add_row(urls, "[red]OFFLINE", f"{time.strftime('%H:%M:%S', time.localtime())}") return table def validate_url(url: str): """adds "http" scheme before url if it doesn't exist.""" pattern = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))" if not re.match(pattern, url): return "http://" + url return url def process_url( url: List[str] = typer.Option(..., "-u", "--url", help="URL of the site that needs to be checked."), cooldown: int = typer.Option(3, "-c", "--cooldown", help="Cooldown for the next request."), infinite: Optional[bool] = typer.Option(False, "-i", "--infinite", help="Runs an infinite loop until the user aborts it.") ): """Zeus the Investigator: Checks if a site is online at the moment.""" if infinite: with Live(check_site(url), refresh_per_second=4) as live: live.console.print("\nInvestigator initiated!(Press [bold blue]CTRL+C[/] to quit)\n") while True: time.sleep(cooldown) live.update(check_site(url)) else: console.print(check_site(url)) console.print("\nInvestigation completed :thumbsup:\n") def main(): typer.run(process_url)
zeustheinvestigator
/ZeusTheInvestigator-0.1.7.tar.gz/ZeusTheInvestigator-0.1.7/ZeusTheInvestigator/checker.py
checker.py
# information about the developer __author__ = "val (zevtyardt)" __version__ = "0.0.4" __description__ = "Python hash cracker" __github__ = "https://github.com/zevtyardt" __email__ = "xnver404[at]gmail[dot]com" # importing module from passlib.hash import mysql323, mysql41, lmhash, nthash from hashlib import * import time import re import argparse import os import requests import sys # lambda function tampil = lambda s,info="info": print(f" [{time.strftime('%H:%M:%S')}] [{info.upper()}] {s}") prog = lambda s,info="info": print(f" [{time.strftime('%H:%M:%S')}] [{info.upper()}] {s} ",end="\r") elapsed = lambda start_time : print(f" Elapsed time {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}\n") # hash algorithms hashlib_hash_algos = list(i for i in algorithms_guaranteed if "shake" not in i) passlib_hash_algos = { "mysql323":mysql323.hash, "mysql41":mysql41.hash, "lmhash":lmhash.hash, "nthash":nthash.hash } __all__ = ["md5md5"]+[i for i in algorithms_available if "shake" not in i or i == "blake2s256" or i == "sha3-256"]+[i for i in passlib_hash_algos] # result data final = {} # ------------------ the beginning of all functions ---------------- # hashing string def hashing(): print ('') # new line for type in __all__: sys.stdout.write(type+':'+('\t'*2)) if len(type) <= 6 else sys.stdout.write(type+':\t') if type in list(o for o in algorithms_available if "shake" not in o): m = new(type) m.update(arg.string.encode()) print(m.hexdigest()) elif type == 'md5md5': obj1 = md5(arg.string.encode()).hexdigest() print(md5(obj1.encode()).hexdigest()) else: print(passlib_hash_algos[type](arg.string.encode())) print ('') # new line # verify hash type class verify: def __init__(self,hash_to_verify): def build_re(hex_len, prefix=r"", suffix=r"(:.+)?"): regex_string = r"^{}[a-f0-9]{{{}}}{}$".format(prefix, hex_len, suffix) return re.compile(regex_string, re.IGNORECASE) self.hash_to_verify = hash_to_verify self.HASH_TYPE_REGEX = { build_re(32, prefix="(md5)?"): [ "md5md5", "md5", "md4", "nthash", "lmhash", "mdc2" ], build_re(16, prefix="(0x)?", suffix="(L)?"): [ "mysql323" ], build_re(64): [ "sha256", "sha3_256", "sm3", "sha512-256" ], build_re(128): [ "sha512", "whirlpool", "sha3_512", "blake2b512", "blake2b" ], build_re(56, suffix=""): [ "sha224", "sha3_224" ], build_re(40): [ "sha1", "ripemd160" ], build_re(96, suffix="", prefix="(0x0100)?"): [ "sha384", "sha3_384" ], build_re(40, prefix=r"\*", suffix=""): [ "mysql41" ], } self.verify_hash_type() def verify_hash_type(self): """verify the type of hash using regex""" for regex, hash_types in self.HASH_TYPE_REGEX.items(): if regex.match(self.hash_to_verify) or regex.match("*"+self.hash_to_verify): final[self.hash_to_verify] = hash_types # create new wordlist class create_new_wordlist: def __init__(self): self.hashes = [i for i in hashes] def _create(self): """make your own internal word list (success rate of ± 70%)""" words = [] for numhash,hash in enumerate(self.hashes): response = requests.get(f"http://www.google.com/search?q={hash}").text list_temp = response.replace(".", " ").replace(":", " ").replace("?", "").replace("(", " ").replace(""", " ").replace("""," ").replace("“"," ").replace("”"," ").replace(","," ").replace(";"," ").replace("="," ").replace(">"," ").replace("<"," ").replace("/"," ").replace(")"," ").replace("{"," ").replace("}"," ").replace("&"," ").replace("-"," ").replace("_"," ").replace("%3F"," ").replace("%26"," ").replace("%3D"," ").replace("%2B","+").replace("#"," ").replace("]"," ").replace("["," ").split() for numtemp,word in enumerate(list_temp): print (f" [{time.strftime('%H:%M:%S')}] [INFO] line:{numhash+1}/{len(self.hashes)} | percent: {int(((numtemp+1) / len(list_temp)) * 100)}% ({int(((numhash+1) / len(self.hashes)) * 100)}%) | total: {len(words)} ",end="\r") if word not in words: words.append(word) print ("") # new line tampil(f"Total words generated: {len(words)} words.") return words # main function class zevcrack: def __init__(self,hash,type): self.func = { "sha256":sha256, "md5":md5, "sha3_384":sha3_384, "sha512":sha512, "sha224":sha224, "blake2s":blake2s, "sha3_224":sha3_224, "sha1":sha1, "shake_128":shake_128, "sha384":sha384, "sha3_256":sha3_256, "blake2b":blake2b, "sha3_512":sha3_512 } self.bin2hex = lambda s: "".join(hex(ord(i)) for i in s).replace("0x","") self.hash = hash self.type = type def mode1(self): for num,i in enumerate(wordlist): if (self.func[self.type](i.encode()).hexdigest()) == self.hash: return self._found(i,num+1,self.type,self.hash) return None def mode2(self): hash_ = ("*"+self.hash) if self.type == "mysql41" else self.hash for num,i in enumerate(wordlist): if passlib_hash_algos[self.type](i.encode()) == hash_: return self._found(i,num+1,self.type,hash_) def mode3(self): for num,i in enumerate(wordlist): h = new(self.type) h.update(i.encode()) if (h.hexdigest()) == self.hash: return self._found(i,num+1,self.type,self.hash) def double(self): for num,i in enumerate(wordlist): obj1 = self.func[self.type[:3]](i.encode()).hexdigest() if self.func[self.type[:3]](obj1.encode()).hexdigest() == self.hash: return self._found(i,num+1,self.type,self.hash) def _found(self,plaintext,line,type,hash): space = ("(space)" if plaintext == " " else "") return (f"\n\n * Clear text:\t\t{plaintext} (0x{self.bin2hex(plaintext)}) {space}\n * tries attempted:\t{line}\n * Hash:\t\t{hash}\n * Algorithm used:\t{type}\n") def _start(types,hash): for type in types: prog(f"algorithm: {type} ") if type in hashlib_hash_algos: obj = [zevcrack(hash,type).mode1()] if obj[0]: return (obj[0]) elif type == "md5md5": obj = [zevcrack(hash,type).double()] if obj[0]: return (obj[0]) elif type in passlib_hash_algos: obj = [zevcrack(hash,type).mode2()] if obj[0]: return (obj[0]) else: obj = [zevcrack(hash,type).mode3()] if obj[0]: return (obj[0]) return (f"\n\n * Status:\t\tNOT FOUND\n * Hash:\t\t{hash}\n") # ---------------------- end of all funtions -------------------- # def main(): global arg, hashes, wordlist parse = argparse.ArgumentParser(usage="python %(prog)s <arguments>",epilog="** if the hash type is more than one. use separator \",\" **") parse.add_argument("-c",metavar="<hash>",dest="hash",help="Specify a hash to crack.") parse.add_argument("-l",metavar="<file-path>",dest="file",help="Provide a file of hashes to crack.") parse.add_argument("-w",metavar="<wordlist>",dest="wordlist",help="Wordlist or \"stdin\" for standard input.") parse.add_argument("-t",metavar="<hash type>",dest="type",help="The type of hash.") parse.add_argument("-o",metavar="<output file>",dest="output",help="Save session/results to file.") parse.add_argument('-d',metavar='<string>',dest='string',help='Hash <string> with all supported hash types.') parse.add_argument("--verify",action="store_true",dest="verify",help="Attempt to find the type of algorithm used by the hash.") parse.add_argument("--show-hash",action="store_true",dest="show",help="show all supported hash type.") parse.add_argument("--verbose",action="store_true",dest="verbose",help="Run the application verbosely.") parse.add_argument('--version',action='store_true',dest='version',help='Display the version information and exit.') arg = parse.parse_args() try: if arg.version: print (f'{__version__}');exit() if arg.string: hashing();exit() if arg.show: print ("\nSupported hash types:") for num,i in enumerate(__all__): if num == 0: sys.stdout.write("\t") sys.stdout.write(i+" ") if (num+1) % 4 == 0:sys.stdout.write("\n\t") print ("\n** Do not include the * in mysql41 hashes. **\n");exit() # new line and exit if arg.hash or arg.file: print (f" {__description__} v{__version__}: {__github__}\n") hashes = [i.strip() for i in open(arg.file,"r").readlines()] if arg.file else [arg.hash] if arg.verbose: tampil(f"Begin executing: {time.strftime('%c')}") if arg.output: tampil(f"Session file: {arg.output}") if arg.hash: tampil(f"Hash: {arg.hash}") elif arg.file: tampil(f"Hash file: {os.path.join(os.getcwd(),arg.file)}") # verify if arg.verify: tampil("Automatic hash type detection is activated.") if arg.file: tampil(f"Found a total of {len(hashes)} hashes to verify.") for hash in hashes: if arg.verbose: prog(f"Analizing hash: {hash[:25]}..") verify(hash) if arg.verbose: print ("") # new line if arg.hash and len(final) > 0: tampil(f"Use possible hash type: {final[arg.hash]}") elif arg.file and len(final) > 0: tampil(f"Can only analyze {len(final)} out of {len(hashes)} hashes.") else: tampil("Can\"t find an algorithm that is suitable.\n Aborting..\n");exit() elif arg.verify == False or arg.type == None: if arg.type == None: tampil("Hash type not entered.","warn") elif arg.verify == False: tampil("Automatic hash detection is not activated.","warn") if arg.type: for hash in hashes: type_temp = [] for type in arg.type.split(","): if type not in type_temp: type_temp.append(type) final[hash] = type_temp if arg.type: tampil(f"Use hash type: {arg.type.split(',')}") else: tampil("use all existing hash types..") for hash in hashes: final[hash] = __all__ if arg.wordlist == None: tampil(f"Wordlist file can\"t be found, create new wordlist.","warn") wordlist = create_new_wordlist()._create() elif arg.wordlist: tampil(f"Wordlist path: {os.path.join(os.getcwd(),arg.wordlist)}") wordlist = [i.strip() for i in open(arg.wordlist,"r").readlines()] tampil(f"{len(wordlist)} words loaded.") if len(wordlist) >= 5000: tampil("The wordlist that is used too much, maybe it will take a little longer.\n","warn") else: print ("") # new line # start cracking if len(hashes) > 0: tampil("start cracking with brute force method.") for hash,types in final.items(): start_time = time.time() result = [_start(types,hash)] if result: print(result[0]) if arg.output: output = open(arg.output,"a") output.write(f"\n # {' '*21}{time.strftime('%c')}\n{result[0][2:]}") if arg.wordlist: output.write(f' * In wordlist:\t\t{os.path.join(os.getcwd(),arg.wordlist)}\n') elapsed(start_time) if arg.verbose: tampil(f"Completed on: {time.strftime('%c')}\n") else: parse.print_help() except KeyboardInterrupt: print("\r \n Signal Interrupt caught.\n Aborting..\n") except Exception as e: print(f"\r \n {e}.\n Aborting..\n") if __name__ == '__main__': main()
zevcrack
/zevcrack-0.0.4.tar.gz/zevcrack-0.0.4/zevcrack.py
zevcrack.py
======= ZEvents ======= Easy to use generic events system. * Free software: MIT license * Documentation: https://zevents.readthedocs.io. Features -------- * EventManager class orchestrating subscriptions, unsubscriptions and notifications * Generic Event class usable to implement custom events * Generic TriggerEvent triggering the processing of queues in EventManager * Standard events collection: TickEvent, QuitEvent Install ------- .. code-block:: console $ pip install tchappui-zevents Example ------- A simple example of how to use ZEvents can be found in the `Usage section <https://zevents.readthedocs.io/en/latest/usage.html>`_ of the documentation. Credits ------- This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
zevents
/zevents-0.2.9.tar.gz/zevents-0.2.9/README.rst
README.rst
.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/tchappui/zevents/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ ZEvents could always use more documentation, whether as part of the official ZEvents docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/tchappui/zevents/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `zevents` for local development. 1. Fork the `zevents` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/zevents.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv zevents $ cd zevents/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 zevents tests $ python setup.py test or py.test $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check https://travis-ci.org/tchappui/zevents/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ py.test tests.test_zevents Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bumpversion patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass.
zevents
/zevents-0.2.9.tar.gz/zevents-0.2.9/CONTRIBUTING.rst
CONTRIBUTING.rst
===== Usage ===== To use zevents in a project:: import zevents The zevents package defines two standard events that can be used to control an event-driven application: TickEvent and QuitEvent. TickEvent aims at being sent in the main application loop to process queues in the internal event manager. Here is an example of how to use those events to control an echo console application using an event-based logic. The KeyboardController class subscribes to Tick events, reacts to those by asking the user to enter a few words and sends a Quit event if the user inputs a `quit`:: from zevents import Event from zevents.dispatch import listener # We create events by subsclassing the zevents.Event class class TickEvent(Event): pass class QuitEvent(Event): pass # For a class to be able to listen at zevents, decorate it as listener @listener class KeyboardController: """Controller responsible to handle keyboard events.""" # Available actions actions = { "quit": QuitEvent.send, } @TickEvent.listen def _on_tick(self, event): """Handles the Tick events.""" user = input("Say something or enter quit: ") action = self.actions.get( user.lower().strip(), lambda: print(user) ) action() The EchoApplication class could be written as follows:: from zevents import Event from zevents.dispatch import Listener # For a class to be able to listen at zevents, you can also subclass Listener class EchoApplication(Listener): """Represents the application itself.""" def __init__(self): super().__init__() self.running = False self.controller = KeyboardController() @QuitEvent.listen def _on_quit(self, event): """Handles Quit events.""" self.running = False def run(self): """Starts the application event loop.""" self.running = True while self.running: # We send a Tick event in each loop TickEvent.send() Let's run this application:: >>> app = EchoApplication() >>> app.run()
zevents
/zevents-0.2.9.tar.gz/zevents-0.2.9/docs/usage.rst
usage.rst
.. highlight:: shell ============ Installation ============ Stable release -------------- To install zevents, run this command in your terminal: .. code-block:: console $ pip install tchappui-zevents or using pipenv: .. code-block:: console $ pipenv install zevents These are the preferred methods to install zevents, as it will always install the most recent stable release. If you neither have `pip`_ nor `pipenv`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _pipenv: https://pipenv.readthedocs.io/en/latest/install/ .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for zevents can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/tchappui/zevents Or download the `tarball`_: .. code-block:: console $ curl -OL https://github.com/tchappui/zevents/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/tchappui/zevents .. _tarball: https://github.com/tchappui/zevents/tarball/master
zevents
/zevents-0.2.9.tar.gz/zevents-0.2.9/docs/installation.rst
installation.rst
from datetime import datetime from enum import IntEnum import logging import httpx _LOGGER = logging.getLogger(__name__) # definition of values in string array (positions) as far as known # see also: https://github.com/solmoller/eversolar-monitor/issues/22 # 0, 1 - unknown # 2 - Registry ID - also MAC address # 3 - Registry Key # 4 - Hardware Version # 5 - Software version # 6 - Time and Date # 7 - Communication status with ZeverCloud # 8 - unknown # 9 - SN. # 10 - Pac(W) # 11 - E_Today(KWh) - attention! Has a bug. # 12 - Status # 13 - unknown # Attention: # - if you split the byte array time and date will be two array entries. # - E_Today(KWh) has a bug. class ArrayPosition(IntEnum): """Defines the value position in the data array.""" unknown0 = 0 unknown1 = 1 registry_id = 2 registry_key = 3 hardware_version = 4 software_version = 5 date_and_time = 6 communication_status = 7 unknown8 = 8 serial_number = 9 pac_watt = 10 energy_today_KWh = 11 status = 12 unknown13 = 13 class ZeversolarError(Exception): """General problem. Possible causes: - The data stream is not as expected. This can sometimes be seen if the inverter tries to reconnect. """ class ZeversolarTimeout(ZeversolarError): """The inverter cannot be reached. Possible causes: - inverter is off (darkness) - wrong IP address """ class InverterData(): def __init__(self, data_array: list[str]) -> None: self._unknown0 = data_array[ArrayPosition.unknown0] self._unknown1 = data_array[ArrayPosition.unknown1] registry_id = data_array[ArrayPosition.registry_id] self._registry_id = registry_id self._registry_key = data_array[ArrayPosition.registry_key] self._hardware_version = data_array[ArrayPosition.hardware_version] self._software_version = data_array[ArrayPosition.software_version] date_and_time = data_array[ArrayPosition.date_and_time] self._communication_status = data_array[ArrayPosition.communication_status] self._unknown8 = data_array[ArrayPosition.unknown0] self._serial_number = data_array[ArrayPosition.serial_number] self._pac_watt : int = int(data_array[ArrayPosition.pac_watt]) val = data_array[ArrayPosition.energy_today_KWh] self._energy_today_KWh : float = float(self._patch(val)) self._status = data_array[ArrayPosition.status] self._unknown13 = data_array[ArrayPosition.unknown13] self._mac_address = f"{registry_id[0:2]}-{registry_id[2:4]}-{registry_id[4:6]}-{registry_id[6:8]}-{registry_id[8:10]}-{registry_id[10:12]}" self._datetime = datetime.strptime(date_and_time, '%H:%M %d/%m/%Y') @property def unknown0(self) -> str: return self._unknown0 @property def unknown1(self) -> str: return self._unknown1 @property def registry_id(self) -> str: return self._registry_id @property def registry_key(self) -> str: return self._registry_key @property def hardware_version(self) -> str: return self._hardware_version @property def software_version(self) -> str: return self._software_version @property def datetime(self) -> datetime: return self._datetime @property def communication_status(self) -> str: return self._communication_status @property def unknown8(self) -> str: return self._unknown8 @property def serial_number(self) -> str: return self._serial_number @property def pac_watt(self) -> int: return self._pac_watt @property def energy_today_KWh(self) -> float: return self._energy_today_KWh @property def status(self) -> str: return self._status @property def unknown13(self) -> str: return self._unknown13 @property def mac_address(self) -> str: return self._mac_address def _patch(self, val: str) -> str: """Fix the missing 0 if only one decimal is given.""" if ( val[-2] == "." ): return val[0:-1] + "0" + val[-1:] return val class Inverter(): def __init__(self, ip_address: str, timeout: int = 5) -> None: self._ip_address : str = ip_address self._timeout : int = timeout self._mac_address : str = None self._serial_number : str = None self._local_data_url : str = f"http://{ip_address}/home.cgi" # ?sid=0 self._local_power_url : str = f"http://{ip_address}/inv_ctrl.cgi" # ?sid=0 @property def mac_address(self): return self._mac_address @property def serial_number(self): return self._serial_number async def async_connect(self) -> None: """Reads inverter related information from the url.""" try: async with httpx.AsyncClient() as client: data = await client.get(self._local_data_url, timeout=self._timeout) # data_array2 = data.content.split() result_string = data.content.decode(encoding="utf-8") data_array = result_string.split('\n') registry_id = data_array[ArrayPosition.registry_id] serial_number = data_array[ArrayPosition.serial_number] mac_address = f"{registry_id[0:2]}-{registry_id[2:4]}-{registry_id[4:6]}-{registry_id[6:8]}-{registry_id[8:10]}-{registry_id[10:12]}" except httpx.TimeoutException as ex: raise ZeversolarTimeout(f"Connection to Zeversolar inverter '{self._ip_address}' timed out.") from ex except Exception as ex: raise ZeversolarError(f"Generic error while connecting to Zeversolar inverter '{self._ip_address}'.") from ex self._mac_address = mac_address self._serial_number = serial_number async def async_get_data(self) -> InverterData: """Reads the actual data from the inverter.""" try: async with httpx.AsyncClient() as client: data = await client.get(self._local_data_url, timeout=self._timeout) result_string = data.content.decode(encoding="utf-8") data_array = result_string.split('\n') except httpx.TimeoutException as ex: raise ZeversolarTimeout(f"Connection to Zeversolar inverter '{self._ip_address}' timed out.") from ex except Exception as ex: raise ZeversolarError(f"Generic error while connecting to Zeversolar inverter '{self._ip_address}'.") from ex return InverterData(data_array) async def power_on(self) -> bool: """Power inverter on.""" return await self._change_power_state(0) async def power_off(self) -> bool: """Power inverter off.""" return await self._change_power_state(1) async def _change_power_state(self, mode : int) -> bool: """Power inverter on or off.""" try: async with httpx.AsyncClient() as client: my_response = await client.post(self._local_power_url, data={'sn': self._serial_number, 'mode': mode}, timeout=self._timeout) return my_response.status_code == 200 except httpx.TimeoutException as ex: raise ZeversolarTimeout(f"Connection to Zeversolar inverter '{self._ip_address}' timed out.") from ex except Exception as ex: raise ZeversolarError(f"Generic error while connecting to Zeversolar inverter '{self._ip_address}'.") from ex
zever-local
/zever_local-1.0.3-py3-none-any.whl/zever_local/inverter.py
inverter.py
# Zevercloud API Python API for the Zevercloud API **Note**: _This piece of software is not approved or endorsed by ZeverCloud. Nor do I endorse their products._ ## Installation You can install `zevercloud-api` using your favorite package manager. For example: ```shell pip install zevercloud-api ``` ## Credentials Three keys are needed to connect to the Zevercloud API: - `api_key` - `app_key` - `app_secret` Your `api_key` can be found on the Zevercloud site, under `Configuration > Plant Configuration > 5. Api Key`. The `app_key` and `app_secret` can be found under `Account Management > Security Settings`, but are only visible once approved by Zeversolar Support. Send an email to [email protected], for example, and ask them to make the `app_key` and `app_secret` visible to you. They typically do so within a day. ## Usage To see the last known status of your site, as well as some yield statistics: ```python from zevercloud import ZeverCloud zc = ZeverCloud(API_KEY, APP_KEY, APP_SECRET) print(zc.overview) ``` ```shell >> { "last_updated": datetime(2022, 2, 3, 13, 57, 26), "online": False, "power": 0, "site_id": 12345, "yield": { "today": 5.9, "month": 218.42, "total": 5800, "year": 1770, }, } ``` ### Historical power and yield Historical yield and power figures can also be obtained: ```python zc.get_output(date=date(2022, 8, 1)) ``` ```shell >> [ ... {"power": 1183, "timestamp": datetime(2022, 8, 1, 12, 0)}, {"power": 1240, "timestamp": datetime(2022, 8, 1, 12, 20)}, {"power": 1815, "timestamp": datetime(2022, 8, 1, 12, 40)}, ... ] ``` ```python zc.get_daily_output(month=date(2022, 8, 1)) ``` ```shell >> [ {"date": date(2022, 3, 1), "yield": 4.1}, {"date": date(2022, 8, 2), "yield": 5.2}, {"date": date(2022, 8, 3), "yield": 0.2}, ... ] ``` ```python zc.get_monthly_output(year=2022) ``` ```shell >> [ {"date": date(2022, 1, 1), "yield": 40.1}, {"date": date(2022, 2, 1), "yield": 52.1}, {"date": date(2022, 3, 1), "yield": 113}, {"date": date(2022, 4, 1), "yield": 8.11}, ... ] ``` ```python zc.get_yearly_output() ``` ```shell >> [ {"year": 2012, "yield": 4069}, {"year": 2013, "yield": 308}, ... ] ``` Power is always presented in W (Watt), and yield in kWh (kiloWatt-hour). Due to the internals of the Zevercloud API, all numbers may be rounded up to two significant digits. ### Events Events (errors) can be listed: ```python zc.get_events(start_date=date(2022, 1, 1), end_date=date(2022, 8, 1)) ``` ```shell >> [ ZeverSolarEvent( event_time=datetime(2022, 1, 1, 12, 34, 56), inverter_id="ZS12345678", event_code=3, event_type=101, ) ] ``` The `ZeverSolarEvent` has a human-readable `event_description`. **Note**: the internal Zevercloud API can only return events for 7 days at a time. Using the `get_events`-method on a large date range will result in many API-calls being made, and may hence possibly take a rather long time. ## Releases - `0.2.1` Improve security by adding timestamp and nonce to requests - `0.2.0` Add `get_details` method - `0.1.0` First working version ### Detailed logs More detailed logs can be retrieved with `get_details`. This endpoint requires the id of the monitor as input. This monitor can be found on the ZeverCloud website. ```python zc.get_details(date=date(2022, 1, 1), psno="EAB1234C5678") ``` ```shell >> [ { 'ac_frequency': 50, 'ac_power': 30, 'ac_current_p1': 0, 'ac_current_p2': 0, 'ac_current_p3': 0, 'ac_voltage_p1': 236.8, 'ac_voltage_p2': 0, 'ac_voltage_p3': 0, 'inverter_id': 'ZS12345678', 'pv_current_1': 0, 'pv_current_2': 0, 'pv_current_3': 0, 'pv_voltage_1': 271.4, 'pv_voltage_2': 0, 'pv_voltage_3': 0, 'temperature': 26.7, 'timestamp': datetime.datetime(2022, 8, 1, 6, 49, 37), 'yield_today': 0.1, 'yield_total': 5615.2 }, ... ] ```
zevercloud-api
/zevercloud-api-0.2.1.tar.gz/zevercloud-api-0.2.1/README.md
README.md
from base64 import b64encode from datetime import datetime, date, time, timedelta from hashlib import sha256 from hmac import new as hmac from time import time as timestamp from typing import Any, Dict, List, Union from uuid import uuid4 from requests import get from zevercloud.event import ZeverSolarEvent class ZeverCloud: """ Python wrapper for the ZeverCloud API Args: api_key (str): Your api key. Can be found on the Zevercloud site, under `Configuration > Plant Configuration > 5. Api Key`. app_key (str): Your app key. Can be found under `Account Management > Security Settings`. app_secret (str): Your app secret. Can be found under `Account Management > Security Settings`. Note that the app_key and app_secret are only visible once approved by Zeversolar Support. Send an email to [email protected], for example, and ask them to make the `app_key` and `app_secret` visible to you. """ def __init__(self, api_key: str, app_key: str, app_secret: str): self.api_key = api_key self.app_key = app_key self.app_secret = app_secret @property def overview(self) -> Dict[str, Any]: """ Retrieve an overview of the current status of the site. Returns a dictionary like: { "last_updated": datetime(2022, 2, 3, 13, 57, 26), "online": False, "power": 0, "site_id": 12345, "yield": { "today": 5.9, "month": 218.42, "total": 5800, "year": 1770, }, } """ result = self._get(f"/getPlantOverview?key={self.api_key}") return { "last_updated": datetime.strptime(result["ludt"], "%Y-%m-%d %H:%M:%S"), "online": result["status"] == 1, "power": self._apply_unit(**result["Power"]), "site_id": result["sid"], "yield": { "today": self._apply_unit(**result["E-Today"]), "month": self._apply_unit(**result["E-Month"]), "year": self._apply_unit(**result["E-Year"]), "total": self._apply_unit(**result["E-Total"]), }, } def get_events(self, start_date: date, end_date: date) -> List[ZeverSolarEvent]: """ Get a list of events -errors- that occurred between start_date and end_date. Note that the API can only return events for 7 days at a time. Using this method on a large date range will result in many API-calls being made, and may hence take a rather long time. Args: start_date (date): The start date (inclusive) end_date (date): The end date (inclusive) """ result = [] while (end_date - start_date).days > 6: result += self._get_events(start_date, start_date + timedelta(days=6)) start_date = start_date + timedelta(days=7) result += self._get_events(start_date, end_date) return result def get_output(self, date: date) -> List[Dict[str, Union[int, datetime]]]: """ Get the power output of the site at 20-minute intervals on the provided date. Returns a list of dictionaries of the form: [ ... {"power": 1183, "timestamp": datetime(2022, 8, 1, 12, 0)}, {"power": 1240, "timestamp": datetime(2022, 8, 1, 12, 20)}, {"power": 1815, "timestamp": datetime(2022, 8, 1, 12, 40)}, ... ] The unit of the power field is Watt. Args: date (date): The date for which to request the power data. """ response = self._get(f"/getPlantOutput?date={date.strftime('%Y-%m-%d')}&key={self.api_key}&period=bydays") return [ dict( timestamp=datetime.combine( date=date, time=time(hour=int(entry["time"][:2]), minute=int(entry["time"][-2:])), ), power=self._apply_unit(value=float(entry["value"]), unit=response["dataunit"]), ) for entry in response["data"] ] def get_details(self, date: date, psno: str) -> List[Dict[str, Union[datetime, int, float, str]]]: """ Get monitor details at 10-minutes on the provided date. Returns a list of dictionaries of the form: [ { 'ac_frequency': 50, 'ac_power': 30, 'ac_current_p1': 0, 'ac_current_p2': 0, 'ac_current_p3': 0, 'ac_voltage_p1': 236.8, 'ac_voltage_p2': 0, 'ac_voltage_p3': 0, 'inverter_id': 'ZS12345678', 'pv_current_1': 0, 'pv_current_2': 0, 'pv_current_3': 0, 'pv_voltage_1': 271.4, 'pv_voltage_2': 0, 'pv_voltage_3': 0, 'temperature': 26.7, 'timestamp': datetime.datetime(2022, 8, 1, 6, 49, 37), 'yield_today': 0.1, 'yield_total': 5615.2 } ] Args: date (date): The date for which to request the data. psno (str): The ID of the monitor. E.g. EAB1234C5678. """ response = self._get(f"/getpmudata?apikey={self.api_key}&date={date.strftime('%Y-%m-%d')}&psno={psno}") return [ dict( ac_frequency=entry.get("fac"), ac_power=entry.get("pac"), ac_current_p1=entry.get("iac1"), ac_current_p2=entry.get("iac2"), ac_current_p3=entry.get("iac3"), ac_voltage_p1=entry.get("vac1"), ac_voltage_p2=entry.get("vac2"), ac_voltage_p3=entry.get("vac3"), inverter_id=entry.get("isno"), pv_current_1=entry.get("ipv1"), pv_current_2=entry.get("ipv2"), pv_current_3=entry.get("ipv3"), pv_voltage_1=entry.get("vpv1"), pv_voltage_2=entry.get("vpv2"), pv_voltage_3=entry.get("vpv3"), temperature=entry.get("tempval"), timestamp=datetime.strptime(entry["recvdate"], "%Y-%m-%d %H:%M:%S"), yield_today=entry.get("e_today"), yield_total=entry.get("e_total"), ) for entry in response["data"] ] def get_daily_output(self, month: date) -> List[Dict[str, Any]]: """ Get the daily yield of the site in the given month. Returns a list of dictionaries of the form: [ {"date": date(2022, 3, 1), "yield": 4.1}, {"date": date(2022, 8, 2), "yield": 5.2}, {"date": date(2022, 8, 3), "yield": 0.2}, ... ] The unit of the yield field is kWh. Args: month (date): The month for which to request yield data. Accepts a datetime.date object, of which the day field is ignored. """ response = self._get(f"/getPlantOutput?date={month.strftime('%Y-%m')}&key={self.api_key}&period=bymonth") return [ { "date": datetime.strptime(entry["time"], "%Y-%m-%d").date(), "yield": self._apply_unit(value=float(entry["value"]), unit=response["dataunit"]), } for entry in response["data"] ] def get_monthly_output(self, year: int) -> List[Dict[str, Any]]: """ Get the monthly yield of the site in the given year. Returns a list of dictionaries of the form: [ {"date": date(2022, 1, 1), "yield": 40.1}, {"date": date(2022, 2, 1), "yield": 52.1}, {"date": date(2022, 3, 1), "yield": 113}, {"date": date(2022, 4, 1), "yield": 8.11}, ... ] The unit of the yield field is kWh. Args: year (int): The year for which to request yield data. """ if not isinstance(year, int) or len(str(year)) != 4: raise ValueError(f"Year must be a four-digit integer. Got {year}.") response = self._get(f"/getPlantOutput?date={year}&key={self.api_key}&period=byyear") return [ { "date": datetime.strptime(entry["time"] + "-01", "%Y-%m-%d").date(), "yield": self._apply_unit(value=float(entry["value"]), unit=response["dataunit"]), } for entry in response["data"] ] def get_yearly_output(self) -> List[Dict[str, Any]]: """ Get the yearly yield of the site in its entire existence. Returns a list of dictionaries of the form: [ {"year": 2012, "yield": 4069}, {"year": 2013, "yield": 308}, ... ] The unit of the yield field is kWh. """ response = self._get(f"/getPlantOutput?key={self.api_key}&period=bytotal") return [ { "year": int(entry["time"]), "yield": self._apply_unit(value=float(entry["value"]), unit=response["dataunit"]), } for entry in response["data"] ] def _get_events(self, start_date: date, end_date: date) -> List[ZeverSolarEvent]: """ Get a list of events that occurred between start_date and end_date. The start and end date may not be more than six days apart. """ if (end_date - start_date).days > 6: raise ValueError("Can not request more than 7 days of events at once.") result = self._get( f"/getPlantEvent?edt={end_date.strftime('%Y-%m-%d')}" f"&key={self.api_key}&sdt={start_date.strftime('%Y-%m-%d')}" ) if result.get("code") == 0: return [] # No events in time range. return [ ZeverSolarEvent( event_time=datetime.strptime(entry["eventTime"], "%Y-%m-%d %H:%M:%S"), event_type=int(entry["eventType"]), event_code=int(entry["eventCode"]), inverter_id=entry["ssno"], ) for entry in result["data"] ] def _get(self, url: str) -> Dict[str, Any]: # TODO: rate limiting headers_to_sign = { "X-Ca-Key": self.app_key, "X-Ca-Nonce": str(uuid4()), "X-Ca-Timestamp": str(int(timestamp() * 1000)), } headers_string = "".join([f"{key}:{headers_to_sign[key]}\n" for key in sorted(headers_to_sign.keys())]) payload = f"GET\napplication/json\n\n\n\n{headers_string}{url}" signature = hmac( key=self.app_secret.encode("UTF-8"), digestmod=sha256, msg=payload.encode("UTF-8"), ).digest() headers = { "X-Ca-Signature-Headers": ",".join(headers_to_sign.keys()), "X-Ca-Signature": b64encode(signature).decode("UTF-8"), "Accept": "application/json", **headers_to_sign, } response = get(f"http://api.general.zevercloud.cn{url}", headers=headers) if response.status_code == 400: # TODO: error handling print(response.headers) response.raise_for_status() return response.json() @staticmethod def _apply_unit(value: float, unit: str) -> Union[float, int]: """ Given a unit and a value, convert the value to a standardized unit. Values are converted to: - W (Watt) for power (i.e. the incoming unit is W, kW, MW) - kWh (kiloWatt-hour) for yield (i.e. the incoming unit is Wh, kWh, MWh) As the Zevercloud API (sometimes) uses incorrect capitalisation (KWh instead of kWh), capitalisation is ignored. """ unit = unit.lower() if unit.startswith("w"): result = value elif unit.startswith("k"): result = 1000 * value elif unit.startswith("m"): result = 1_000_000 * value else: raise ValueError(f"Unrecognized unit: {unit}") if unit.endswith("h"): # We convert yield to kWh return result / 1000 else: return int(round(result)) @property def inverters(self) -> List[str]: """Get a list of inverter ids associated to your site.""" result = self._get(f"/getInverterOverview?key={self.api_key}") return [entry["isno"] for entry in result["data"]]
zevercloud-api
/zevercloud-api-0.2.1.tar.gz/zevercloud-api-0.2.1/src/zevercloud/cloud.py
cloud.py
python-glances-api ================== Python API for interacting with `Zeversolar <https://www.zevercloud.com>`_. This module is not official, developed, supported or endorsed by Zeversolar. Installation ------------ The module is available from the `Python Package Index <https://pypi.python.org/pypi>`_. .. code:: bash $ pip3 install zeversolar_api Usage ----- The file ``example.py`` contains an example about how to use this module. Development ----------- For development is recommended to use a ``venv``. .. code:: bash $ python3.6 -m venv . $ source bin/activate $ python3 setup.py develop License ------- ``python-zeversolar-api`` is licensed under MIT, for more details check LICENSE.
zeversolar-api
/zeversolar_api-0.1.0.tar.gz/zeversolar_api-0.1.0/README.rst
README.rst
import asyncio import logging import aiohttp import async_timeout from . import exceptions _LOGGER = logging.getLogger(__name__) _RESOURCE = 'https://{host}/api/v{version}/getPlantOverview?key=' class Zeversolar(object): """A class for handling the data retrieval.""" def __init__(self, loop, session, host='www.zevercloud.com', version=1): """Initialize the connection.""" self._loop = loop self._session = session self.url = _RESOURCE.format(host=host, version=version) self.data = None self.values = None self.plugins = None async def get_data(self): """Retrieve the data.""" url = '{}/{}'.format(self.url, 'all') try: with async_timeout.timeout(5, loop=self._loop): response = await self._session.get(url) _LOGGER.debug("Response from Zeversolar API: %s", response.status) self.data = await response.json() _LOGGER.debug(self.data) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from Zeversolar API") raise exceptions.ZeversolarApiConnectionError() async def get_metrics(self, element): """Get all the metrics for a monitored element.""" await self.get_data() await self.get_plugins() if element in self.plugins: self.values = self.data[element] else: raise exceptions.ZeversolarApiError("Element data not available") async def get_plugins(self): """Retrieve the available plugins.""" url = '{}/{}'.format(self.url, 'pluginslist') try: with async_timeout.timeout(5, loop=self._loop): response = await self._session.get(url) _LOGGER.debug("Response from Zeversolar API: %s", response.status) self.plugins = await response.json() _LOGGER.debug(self.plugins) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load plugins from Zeversolar API") raise exceptions.ZeversolarApiConnectionError()
zeversolar-api
/zeversolar_api-0.1.0.tar.gz/zeversolar_api-0.1.0/zeversolar_api/__init__.py
__init__.py
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [2022] [kvanzuijlen] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
zeversolar
/zeversolar-0.3.1-py3-none-any.whl/zeversolar-0.3.1.dist-info/LICENSE.md
LICENSE.md
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) ![PyPI](https://img.shields.io/pypi/v/zeversolarlocal) [![codecov](https://codecov.io/gh/sander76/zeversolarlocal/branch/master/graph/badge.svg?token=FED6T168H0)](https://codecov.io/gh/sander76/zeversolarlocal) # zeversolarlocal Access solar energy data from your local Zeversolar inverter. ## Principle This library accesses the `home.cgi` endpoint which returns a bytes object, which is parsed returning current power generation and the daily generated energy. The inverter is powered by the solar energy generated by the connected solar panels. This means that some generated errors can mean different things depending on circumstances. If the inverter cannot be reached a `ZeverTimeout` is raised. Which could mean: 1. The ip-address of the inverter is wrong. 2. The inverter is switched off (no solar power). But can be considered expected behavior. If returning data cannot be parsed properly a `ZeverError` is raised. Which could mean: 1. The returned data is wrong and is indeed an error. 1. Sometimes some other data is returned (which meaning is currently unknown), which cannot be parsed, but everything else is fine. ## installation `pip install zeversolarlocal` ## usage ```python import asyncio import zeversolarlocal loop = asyncio.get_event_loop() def get(): address = "192.168.1.12" # ip address of your zeversolar inverter. url = zeversolarlocal.default_url(address) solar_data = loop.run_until_complete(zeversolarlocal.solardata(url)) print(solar_data) ``` ## CLI from the commandline use: `python -m zeversolarlocal 192.168.1.12` # Changelog ## 1.0.0 - First release
zeversolarlocal
/zeversolarlocal-1.1.0.tar.gz/zeversolarlocal-1.1.0/README.md
README.md
import requests class zevvle(): def __init__(self, key, url="https://api.zevvle.com"): """ Initializes the SDK. :param key: Your Zevvle API key. :param url optional: The Zevvle API URL. """ if(not key): raise Exception("Missing API key.") self._url = url self._header = {"Authorization": "Bearer {}".format(key)} def _do_request(self, url, parameters={}): """ Makes a request. :param url: The full URL of the request. :param parameters optional: Dict of parameters for the request. :returns: JSON response. """ if(not url): raise Exception("Called without URL") r = requests.get(self._url + url, headers=self._header, params=parameters) if(r.status_code == 200): return r.json() raise Exception("Request failed with error {}".format(str(r.status_code))) def get_account(self, account_id): """ Looks up Zevvle account. :param account_id: ID of the Zevvle account to look up. :returns: Zevvle account details. """ if(not account_id): raise Exception("Missing account_id parameter") return self._do_request("/accounts/{}".format(account_id)) def get_user(self, user_id): """ Looks up Zevvle user. :param user_id: ID of the Zevvle user to look up. :returns: Zevvle user details. """ if(not user_id): raise Exception("Missing user_id parameter") return self._do_request("/users/{}".format(user_id)) def get_sim(self, sim_id): """ Looks up Zevvle SIM card. :param sim_id: ID of the Zevvle SIM card to look up. :returns: Zevvle SIM card details. """ if(not sim_id): raise Exception("Missing sim_id parameter") return self._do_request("/sim_cards/{}".format(sim_id)) def list_sim_cards(self): """ Lists all SIM cards linked to the Zevvle API key. :returns: SIM cards for the API key in use. """ return self._do_request("/sim_cards") def list_call_records(self, sim_id, type="", limit="", before=None, after=None): """ Lists call records for a given Zevvle SIM ID, according to filtering. :param sim_id: ID of the Zevvle SIM card to get records for. :param type optional: Call record type (data, voice, sms, mms) to filter on. :param limit optional: How many records to limit the results to. :param before: Limit results to records before a given datetime. :param after: Limit results to records after a given datetime. :returns: Call records for the given query. """ parameters = {} if(not sim_id): raise Exception("Missing sim_id parameter") else: parameters["sim_card_id"] = sim_id if(type and type not in ["data", "voice", "sms", "mms"]): raise Exception("Invalid call record type (data, voice, sms, mms) only") elif(type): # Valid type supplied parameters["type"] = type if(limit): parameters["limit"] = limit if(before): parameters['before'] = before if(after): parameters['after'] = after return self._do_request("/call_records/", parameters) def get_call_record(self, call_record_id): """ Looks up Zevvle call record. :param call_record_id: ID of the Zevvle call record to look up. :returns: Zevvle call record details. """ if(not call_record_id): raise Exception("Missing call_record_id parameter") return self._do_request("/call_records/{}".format(call_record_id))
zevvle-python-sdk
/zevvle_python_sdk-0.0.3-py3-none-any.whl/zevvle/zevvle.py
zevvle.py
============================================= Zeyrek: Morphological Analyzer and Lemmatizer ============================================= .. image:: https://img.shields.io/pypi/v/zeyrek.svg :target: https://pypi.python.org/pypi/zeyrek .. image:: https://readthedocs.org/projects/zeyrek/badge/?version=latest :target: https://zeyrek.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://github.com/obulat/zeyrek/workflows/build/badge.svg?branch=master :alt: build Zeyrek is a partial port of Zemberek library to Python for lemmatizing and analyzing Turkish language words. It is in alpha stage, and the API will probably change. * Free software: MIT license * Documentation: https://zeyrek.readthedocs.io. Basic Usage ~~~~~~~~~~~ To use Zeyrek, first create an instance of MorphAnalyzer class:: >>> import zeyrek >>> analyzer = zeyrek.MorphAnalyzer() Then, you can call its `analyze` method on words or texts to get all possible analyses:: >>> print(analyzer.analyze('benim')) Parse(word='benim', lemma='ben', pos='Noun', morphemes=['Noun', 'A3sg', 'P1sg'], formatted='[ben:Noun] ben:Noun+A3sg+im:P1sg') Parse(word='benim', lemma='ben', pos='Pron', morphemes=['Pron', 'A1sg', 'Gen'], formatted='[ben:Pron,Pers] ben:Pron+A1sg+im:Gen') Parse(word='benim', lemma='ben', pos='Verb', morphemes=['Noun', 'A3sg', 'Zero', 'Verb', 'Pres', 'A1sg'], formatted='[ben:Noun] ben:Noun+A3sg|Zero→Verb+Pres+im:A1sg') Parse(word='benim', lemma='ben', pos='Verb', morphemes=['Pron', 'A1sg', 'Zero', 'Verb', 'Pres', 'A1sg'], formatted='[ben:Pron,Pers] ben:Pron+A1sg|Zero→Verb+Pres+im:A1sg') If you only need the base form of words, or lemmas, you can call `lemmatize`. It returns a list of tuples, with word itself and a list of possible lemmas:: >>> print(analyzer.lemmatize('benim')) [('benim', ['ben'])] Credits ------- This package is a Python port of part of the Zemberek_ package by `Ahmet A. Akın`_ .. _Zemberek: https://github.com/ahmetaa/zemberek-nlp .. _Ahmet A. Akın: https://github.com/ahmetaa/ This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
zeyrek
/zeyrek-0.1.3.tar.gz/zeyrek-0.1.3/README.rst
README.rst
.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/obulat/trLemmer/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ Turkish Lemmatizer could always use more documentation, whether as part of the official Turkish Lemmatizer docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/obulat/trLemmer/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `trLemmer` for local development. 1. Fork the `trLemmer` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/trLemmer.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv trLemmer $ cd trLemmer/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 trLemmer tests $ python setup.py test or py.test $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check https://travis-ci.org/obulat/trLemmer/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ py.test tests.test_trLemmer Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bumpversion patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass.
zeyrek
/zeyrek-0.1.3.tar.gz/zeyrek-0.1.3/CONTRIBUTING.rst
CONTRIBUTING.rst
Zeyrek ====== Zeyrek is a python morphological analyzer and lemmatizer for Turkish. It is a partial port of the `Zemberek-NLP Tools (Morphology) <https://github.com/ahmetaa/zemberek-nlp>`_ Zeyrek can perform morphological analysis of Turkish text, returning all possible parses for each word, and lemmatize words, returning all possible base (non-inflected) forms of words. Usage ===== To use Zeyrek, first create an instance of MorphAnalyzer class:: >>> import zeyrek >>> analyzer = zeyrek.MorphAnalyzer() Then, you can call its `analyze` method on words or texts to get all possible analyses:: >>> for parse in analyzer.analyze('benim')[0]: ... print(parse) Parse(word='benim', lemma='ben', pos='Noun', morphemes=['Noun', 'A3sg', 'P1sg'], formatted='[ben:Noun] ben:Noun+A3sg+im:P1sg') Parse(word='benim', lemma='ben', pos='Pron', morphemes=['Pron', 'A1sg', 'Gen'], formatted='[ben:Pron,Pers] ben:Pron+A1sg+im:Gen') Parse(word='benim', lemma='ben', pos='Verb', morphemes=['Noun', 'A3sg', 'Zero', 'Verb', 'Pres', 'A1sg'], formatted='[ben:Noun] ben:Noun+A3sg|Zero→Verb+Pres+im:A1sg') Parse(word='benim', lemma='ben', pos='Verb', morphemes=['Pron', 'A1sg', 'Zero', 'Verb', 'Pres', 'A1sg'], formatted='[ben:Pron,Pers] ben:Pron+A1sg|Zero→Verb+Pres+im:A1sg') If you only need the base form of words, or lemmas, you can call `lemmatize`. It returns a list of tuples, with word itself and a list of possible lemmas:: >>> print(analyzer.lemmatize('benim')) [('benim', ['ben'])] Installation ============ To install Zeyrek, run this command in your terminal: .. code-block:: console $ pip install zeyrek User Guide ========== Zeyrek's morphological analyzer returns instances of Parse object (based on pymorphy2's Parse), which is a wrapper of namedtuple class. Parse object fields include: - `word`: the word itself - `lemma`: base form of the word, as found in a dictionary - `pos`: part of speech of the word. Note: Turkish is an agglutinative language, which makes it quite different from widespread European languages. A word can usually be much longer, made of Inflection Groups (IG), which can correspond to words in other languages. Each of these IGs can have its own part of speech, and the part of speech of the word as a whole is determined by the part of speech of the last IG. - `morphemes`: sequence of morphemes in the word, a list of strings - abbreviations of English names of morphemes. - `formatted`: a human-readable string representation of the analysis. There are several kinds of possible formats. Default formatter shows the dictionary item and its part of speech, and morphemes (with their surfaces, if available), divided into inflectional groups by `|` character. License ======= Licensed under MIT License. Zemberek, from parts of which Zeyrek was ported, is under Apache License, Version 2.0. Disclaimer ========== This project is in alpha stage, so the API can change.
zeyrek
/zeyrek-0.1.3.tar.gz/zeyrek-0.1.3/docs/index.rst
index.rst
from zeys.api.special_keys_builder import SpecialKeysBuilder import io import sys import time import select import termios import tty DELETE_CHAR='\x7f' ENTER_CHAR='\n' ESC_CHAR='\x1b' TAB_CHAR='\t' DEFAULT_SPEED = 0.03 class PosixDetector: def __init__(self, print_group=False, special_keys=None, speed=None): self._print_group = print_group self._special_keys = special_keys self._speed = speed if(special_keys is None): self._special_keys = self.get_default_special_keys_builder().build() if(speed is None): self._speed = DEFAULT_SPEED # Map special key outputs to their input sequence of ordinal key numbers def get_default_special_keys_builder(self): special_keys_builder = SpecialKeysBuilder() special_keys_builder.add_mapping("enter", [ ord(ENTER_CHAR) ]) special_keys_builder.add_mapping("esc", [ ord(ESC_CHAR) ]) special_keys_builder.add_mapping("tab", [ ord(TAB_CHAR) ]) special_keys_builder.add_mapping("delete", [ ord(DELETE_CHAR) ]) special_keys_builder.add_mapping("arrow-up", [ 27, 91, 65 ]) special_keys_builder.add_mapping("arrow-down", [ 27, 91, 66 ]) special_keys_builder.add_mapping("arrow-right", [ 27, 91, 67 ]) special_keys_builder.add_mapping("arrow-left", [ 27, 91, 68 ]) return special_keys_builder def _has_data(self): return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []) # Low level capture function def _capture(self): settings = termios.tcgetattr(sys.stdin) tty.setcbreak(sys.stdin.fileno()) try: with io.open(sys.stdin.fileno(), 'rb', buffering=0, closefd=False) as std: data = [] while True: if(self._has_data()): input_obj = x=std.read(1) char = input_obj[0] data.append(int(char)) else: time.sleep(self._speed) if(len(data) > 0): yield data data = [] finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings) def run(self): capture_generator = self._capture() for capture_group in capture_generator: skip = 0 for index in range(0, len(capture_group)): if(skip > 0): skip = skip -1 else: item = capture_group[index] #print("ordinal: " + str(item)) special_key, sequence_length = self._special_keys.check_special_keys(index, capture_group) if(special_key is not None): skip = sequence_length - 1 yield special_key else: yield chr(item)
zeys
/api/posix_detector.py
posix_detector.py
import io import sys import time import msvcrt RETURN_CHAR='\r' #NEWLINE_CHAR='\n' ESC_CHAR='\x1b' TAB_CHAR='\t' DEFAULT_SPEED = 0.03 class NtDetector: def __init__(self, print_group=False, special_key_map=None, speed=None): self._print_group = print_group if(special_key_map is None): special_key_map = self.get_default_special_key_map() if(speed is None): speed = DEFAULT_SPEED # Special key text -> list of ordinals to match against self._special_key_map = special_key_map # First ordinal in special key map sequence -> list of special keys in special key map that match the first ordinal # The list of special keys is ordered by the special key with the longest ordinal match self._lookup_key_map = self._get_special_key_index(special_key_map) self._speed = speed # Map special key outputs to their input sequence of ordinal key numbers def get_default_special_key_map(self): char_map = {} char_map["enter"] = [ ord(RETURN_CHAR) ] char_map["esc"] = [ ord(ESC_CHAR) ] char_map["tab"] = [ ord(TAB_CHAR) ] char_map["delete"] = [ 8 ] char_map["delete"] = [ 0, 83 ] char_map["delete"] = [ 224, 83 ] char_map["arrow-up"] = [ 224, 72 ] char_map["arrow-down"] = [ 224, 80 ] char_map["arrow-right"] = [ 224, 77 ] char_map["arrow-left"] = [ 224, 75 ] return char_map # Build an index to quickly determine if ordinal input is a possible special key def _get_special_key_index(self, special_key_map): quick_lookup = {} for name in special_key_map: ordinal = special_key_map[name][0] if(ordinal not in quick_lookup): quick_lookup[ordinal] = [] quick_lookup[ordinal].append(name) def custom_sort(e): return len(self._special_key_map[e]) for ordinal in quick_lookup: options = quick_lookup[ordinal] options.sort(key=custom_sort,reverse=True) return quick_lookup def _has_data(self): return msvcrt.kbhit() # Low level capture function def _capture(self): with io.open(sys.stdin.fileno(), 'rb', buffering=0, closefd=False) as std: data = [] while True: if(self._has_data()): input_obj = msvcrt.getch() char = input_obj[0] data.append(char) else: time.sleep(self._speed) if(len(data) > 0): yield data data = [] def _check_special_keys(self, index, capture_group): item = capture_group[index] special_keys = self._lookup_key_map.get(item, []) for special_key in special_keys: sequence = self._special_key_map[special_key] if(index < len(capture_group) - (len(sequence) - 1)): match = True for sequence_index in range(1, len(sequence)): if(capture_group[index + sequence_index] != sequence[sequence_index]): match = False break if(match): return special_key return None def run(self): capture_generator = self._capture() for capture_group in capture_generator: if(self._print_group): print("stdin input sequence: " + str(capture_group)) skip = 0 for index in range(0, len(capture_group)): if(skip > 0): skip = skip -1 else: item = capture_group[index] #print("ordinal: " + str(item)) special_key = self._check_special_keys(index, capture_group) if(special_key is not None): skip = len(self._special_key_map[special_key]) - 1 yield special_key else: yield chr(item)
zeys
/api/nt_detector.py
nt_detector.py
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import requests desired_capabilities = DesiredCapabilities.CHROME desired_capabilities['pageLoadStrategy']='none' def main(): headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'zh-CN,zh;q=0.9', 'Cache-Control':'max-age=0', 'Connection':'keep-alive', 'Host':'tieba.baidu.com', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36' } selenium_cookies=[ {'domain': '.tieba.baidu.com', 'expiry': 1621043898, 'httpOnly': True, 'name': 'STOKEN', 'path': '/', 'secure': False, 'value': '4dd187b74496abb206bcadec7b194361e316aec330f6d0f9ed2f026a5c4419a6'}, {'domain': '.baidu.com', 'expiry': 1877651897, 'httpOnly': True, 'name': 'BDUSS_BFESS', 'path': '/', 'sameSite': 'None', 'secure': True, 'value': 'MySjBDUGRmTX5oWlZxOTJzS0pTZDNNY1pwcUdaeTNlYkhIVGJyN0dKNjVLcDlnRVFBQUFBJCQAAAAAAAAAAAEAAADrkk43tfG~zMLWwKowAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALmdd2C5nXdgND'}, {'domain': '.tieba.baidu.com', 'httpOnly': False, 'name': 'Hm_lpvt_98b9d8c2fd6608d564bf2ac2ae642948', 'path': '/', 'secure': False, 'value': '1618451888'}, {'domain': '.tieba.baidu.com', 'expiry': 1649987887, 'httpOnly': False, 'name': 'Hm_lvt_98b9d8c2fd6608d564bf2ac2ae642948', 'path': '/', 'secure': False, 'value': '1618451888'}, {'domain': '.baidu.com', 'expiry': 1877651897, 'httpOnly': True, 'name': 'BDUSS', 'path': '/', 'secure': False, 'value': 'MySjBDUGRmTX5oWlZxOTJzS0pTZDNNY1pwcUdaeTNlYkhIVGJyN0dKNjVLcDlnRVFBQUFBJCQAAAAAAAAAAAEAAADrkk43tfG~zMLWwKowAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALmdd2C5nXdgND'}, {'domain': '.baidu.com', 'expiry': 1649987886, 'httpOnly': False, 'name': 'BAIDUID_BFESS', 'path': '/', 'sameSite': 'None', 'secure': True, 'value': 'B58886C02FFA16885AC661DCD59D8B38:FG=1'}, {'domain': '.baidu.com', 'expiry': 1649987886, 'httpOnly': False, 'name': 'BAIDUID', 'path': '/', 'secure': False, 'value': 'B58886C02FFA16885AC661DCD59D8B38:FG=1'} ] requests_cookies={} for cookie in selenium_cookies: requests_cookies[cookie['name']] = cookie['value'] r= requests.get('https://tieba.baidu.com/', headers=headers, cookies=requests_cookies) print(r.text)
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider7_Selenium1.py
SampleSprider7_Selenium1.py
from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import time from selenium.webdriver.support.ui import WebDriverWait desired_capabilities = DesiredCapabilities.CHROME desired_capabilities['pageLoadStrategy']='none' class page_source_exisits(object): def __call__(self,driver): try: return driver.page_source except BaseException: return False class add_cookies(object): def __call__(self,driver): try: for cookie in selenium_cookies: driver.add_cookie(cookie) return True except BaseException: return False def main(): headers={ 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'zh-CN,zh;q=0.9', 'Cache-Control':'max-age=0', 'Connection':'keep-alive', 'Host':'tieba.baidu.com', 'Upgrade-Insecure-Requests':'1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36' } option=webdriver.ChromeOptions() for header in headers.items(): option.add_argument('{}="{}"'.format(header.__getitem__(0),header.__getitem__(1))) driver=webdriver.Chrome('D://谷歌驱动/chromedriver.exe',options=option) driver.get('https://tieba.baidu.com/') selenium_cookies=[ {'domain': '.tieba.baidu.com', 'expiry': 1621043898, 'httpOnly': True, 'name': 'STOKEN', 'path': '/', 'secure': False, 'value': '4dd187b74496abb206bcadec7b194361e316aec330f6d0f9ed2f026a5c4419a6'}, {'domain': '.baidu.com', 'expiry': 1877651897, 'httpOnly': True, 'name': 'BDUSS_BFESS', 'path': '/', 'sameSite': 'None', 'secure': True, 'value': 'MySjBDUGRmTX5oWlZxOTJzS0pTZDNNY1pwcUdaeTNlYkhIVGJyN0dKNjVLcDlnRVFBQUFBJCQAAAAAAAAAAAEAAADrkk43tfG~zMLWwKowAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALmdd2C5nXdgND'}, {'domain': '.tieba.baidu.com', 'httpOnly': False, 'name': 'Hm_lpvt_98b9d8c2fd6608d564bf2ac2ae642948', 'path': '/', 'secure': False, 'value': '1618451888'}, {'domain': '.tieba.baidu.com', 'expiry': 1649987887, 'httpOnly': False, 'name': 'Hm_lvt_98b9d8c2fd6608d564bf2ac2ae642948', 'path': '/', 'secure': False, 'value': '1618451888'}, {'domain': '.baidu.com', 'expiry': 1877651897, 'httpOnly': True, 'name': 'BDUSS', 'path': '/', 'secure': False, 'value': 'MySjBDUGRmTX5oWlZxOTJzS0pTZDNNY1pwcUdaeTNlYkhIVGJyN0dKNjVLcDlnRVFBQUFBJCQAAAAAAAAAAAEAAADrkk43tfG~zMLWwKowAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALmdd2C5nXdgND'}, {'domain': '.baidu.com', 'expiry': 1649987886, 'httpOnly': False, 'name': 'BAIDUID_BFESS', 'path': '/', 'sameSite': 'None', 'secure': True, 'value': 'B58886C02FFA16885AC661DCD59D8B38:FG=1'}, {'domain': '.baidu.com', 'expiry': 1649987886, 'httpOnly': False, 'name': 'BAIDUID', 'path': '/', 'secure': False, 'value': 'B58886C02FFA16885AC661DCD59D8B38:FG=1'} ] # time.sleep(10) # try: # for cookie in selenium_cookies: # driver.add_cookie(cookie) # except BaseException: # time.sleep(10) # for cookie in selenium_cookies: # driver.add_cookie(cookie) WebDriverWait(driver,20).until(add_cookies()) driver.refresh() page_source=WebDriverWait(driver,20).until(page_source_exisits()) print(page_source) driver.quit()
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider7_Selenium2.py
SampleSprider7_Selenium2.py
#define CO_MAXBLOCKS 40 from selenium import webdriver from selenium.webdriver.chrome.options import Options from sampleSprider.SampleSprider9_Pinduoduo1 import main import time from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) headers = { 'Connection': 'keep-alive', 'Cache-Control': 'no-cache', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'Accept': 'application/json, text/plain, */*', 'Referer': 'https://static.pddpic.com/', 'Accept-Language': 'zh-CN,zh;q=0.9', 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"', 'Origin': 'http://mobile.pinduoduo.com', 'sec-ch-ua-mobile': '?0', 'authority': 'th.pinduoduo.com', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'content-type': 'text/plain;charset=UTF-8', 'accept': '*/*', 'origin': 'http://mobile.pinduoduo.com', 'sec-fetch-site': 'cross-site', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'http://mobile.pinduoduo.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'AccessToken': 'TDTVO6CI6S32K6UCKGJELOUUETBIFO6PWKA5JXSKB3CVX6OSF2XA1103853', 'Content-Type': 'application/json;charset=UTF-8', 'Pragma': 'no-cache', } with open('D://SampleSprider/pingduoduo_cookies.txt','r') as file: cookies=eval(file.read()) class click_jiaoshui(object): def __call__(self,driver): try: jiaoshui=driver.find_element_by_xpath('//*[@id="waterbottle"]/div[3]/div[1]/div/div/img') jiaoshui.click() return True except BaseException: try: jiaoshui2=driver.find_element_by_xpath('//*[@id="waterbottle"]/div[4]') shengyu=jiaoshui2.text log.info('还剩余{}水'.format(shengyu)) if int(str(shengyu).strip('g'))<10: log.error('剩余水不足,停止浇水。。。') return 2 jiaoshui2.click() return True except BaseException: return False class click_lingshuidi(object): def __call__(self,driver): try: lingshuidi=driver.find_element_by_xpath('//*[@id="MissionListIcon"]/div/div/div[2]/img') lingshuidi.click() return True except BaseException: return False class click_meirishuidi(object): def __call__(self,driver): try: meirishuidi=driver.find_element_by_xpath('//*[@id="missionlist-36155"]/div[3]/div/button') meirishuidi.click() log.info('领取每日水滴成功。。。') WebDriverWait(driver,5).until(close_meirishuidi()) return True except BaseException: return False class close_meirishuidi(object): def __call__(self,driver): try: close=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[77]/div/div[2]/div[2]/div/div/img') jixu=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[77]/div/div[2]/div[3]/div[4]/button[2]') jixu.click() log.info('关闭 每日水滴 提示成功。。。') return True except BaseException: return False # 领取左方的30分钟满水盆 class click_shuipen(object): def __call__(self, driver): try: shuipen=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[2]/div[1]/div[1]/div/div[3]/div/div[1]/img') shuipen.click() try: time.sleep(1) try: weimanyig=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[92]/div/div[2]/div[2]/div[5]') except BaseException: weimanyig=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[94]/div/div[2]/div[2]/div[5]') weimanyig.click() log.error('水盆未满1g不可领取') return 2 except BaseException: log.info('领取水盆里的水滴成功。。。。') return 1 except BaseException: return False # 领取树右方的每日圆水瓶 class click_yuanping(object): def __call__(self, driver): try: try: yuanping=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[2]/div[1]/div[6]/div[3]/div/div/div[4]/div') except BaseException: try: yuanping=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[2]/div[1]/div[6]/div[3]/div/div/div[4]/div/div[5]/div/span[2]') except BaseException: driver.find_element_by_xpath('//*[@id="fruit-container"]/div[2]/div[1]/div[6]/div[3]/div/div/div[4]') ActionChains(driver).move_to_element(yuanping).click().perform() try: try: yijiaoshui=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[172]/div/div[2]/button') yijiaoshui.click() log.error('今日已领取过圆瓶里的水滴了。。。。') return 2 except BaseException: time.sleep(1) yijiaoshui=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[171]/div/div[2]/button') yijiaoshui.click() try: chacha=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[75]/div/div[2]/div/div[2]/div/div/img') chacha.click() log.error('剩余水滴不足') return True except BaseException: log.error('今日已领取过圆瓶里的水滴了。。。。') return 2 except BaseException: log.info('领取圆瓶里的水滴成功。。。。') return 1 except BaseException: return False # 打卡集水滴 class click_daka(object): def __call__(self, driver): try: try: daka=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[1]/div[1]/div[3]/div[2]/div/div/div/div[3]') except BaseException: daka=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[1]/div[1]/div[3]/div[2]/div/div/div') daka.click() try: daka_next=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[139]/div/div[2]/div[4]/div[3]') except BaseException: daka_next=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[138]/div/div[2]/div[4]/div[3]') daka_next.click() chacha=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[138]/div/div[2]/div[1]/div/div/img') chacha.click() if daka_next.text == '今日已打卡': log.error('今日已打卡...') else: log.info('打卡集水滴成功。。。。') return True except BaseException: return False # 每日三餐开福袋 class click_sancanfudai(object): def __call__(self, driver): try: sancanfudai=driver.find_element_by_xpath('//*[@id="14"]') ActionChains(driver).move_to_element(sancanfudai).click().perform() qujiaoshui=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[35]/div[2]/div[4]/button') qujiaoshui.click() log.info('三餐福袋领取成功。。。。') return True except BaseException: return False # 浇水竞赛 class click_jiaoshuijingsai(object): def __call__(self, driver): try: jiaoshuijingsai=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[1]/div[1]/div[3]/div[1]/div/div/div/div[1]/img') ActionChains(driver).move_to_element(jiaoshuijingsai).click().perform() log.info('点击浇水竞赛.....') state=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[118]/div[1]/div/div[3]/div[2]/div[2]/div[2]') state_text=state.text if state_text!='待完成': state.click() log.info('浇水竞赛前三名奖励领取成功') chacha=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[118]/div[1]/div/div[3]/div[1]/div[2]/div/div/img') chacha.click() log.error('浇水竞赛未达到前三名....') return True except BaseException: return False # 幸运红包 class click_xingyunhongbao(object): def __call__(self, driver): try: xingyunhongbao=driver.find_element_by_xpath('//*[@id="progress-bar-id"]/div/div[2]/div/div/div/div[2]/div[2]/img') xingyunhongbao.click() log.info('正在点击幸运红包 。。。') return True except BaseException: return False class click_toushui(object): def __call__(self, driver): try: toushui=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[2]/div[1]/div[3]/div/div[4]/div[2]/img') toushui.click() log.info('点击偷水。。。') return True except BaseException: return False class visit_friends(object): def __call__(self, driver): try: friends_list=driver.find_element_by_xpath('//*[@id="friendList"]').find_elements_by_class_name('friend-list-item') for friend in friends_list: friend_name=friend.find_element_by_class_name('friend-name').text if friend_name != '删除好友': friend.click() try: WebDriverWait(driver,5).until(click_toushui()) except BaseException: driver.get(url) log.info('访问所有好友成功...') huijia=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[1]/div[1]/div[2]/div/div[1]/div/div[1]/img') huijia.click() return True except BaseException: return False # 领化肥 class click_linghuafei(object): def __call__(self, driver): try: linghuafei=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[1]/div[1]/div[2]/div/div[3]/div[2]/img') linghuafei.click() log.info('正在点击 领化肥。。。') time.sleep(1) try: linghuafei_daka=driver.find_element_by_xpath('//*[@id="missionlist-36069"]/div[3]/div/button') linghuafei_daka.click() log.info('成功打卡领化肥。。。') except BaseException: log.error('已打卡,稍后再点。。。') liulan=driver.find_element_by_xpath('//*[@id="missionlist-36029"]/div[3]/div[2]/button') liulan.click() return True except BaseException: return False if __name__=='__main__': url='http://mobile.pinduoduo.com/garden_home.html?_pdd_fs=1&_pdd_tc=676666&_pdd_sbs=1&fun_id=app_home&refer_page_el_sn=15079&refer_page_name=login&refer_page_id=10169_1619419956651_sowf5xitxz&refer_page_sn=10169' options=webdriver.ChromeOptions() for header in headers: options.add_argument('{}="{}"'.format(header,headers[header])) chrome_options = Options() chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222") # 前面设置的端口号 driver = webdriver.Chrome(executable_path='D://谷歌驱动/chromedriver.exe', options=chrome_options) log.info('开始访问拼多多果园。。。。') driver.get(url) driver.maximize_window() # for cookie in cookies: # driver.add_cookie(cookie) # log.info('添加cookies后访问。。。。') # driver.get(url) # cookies失效或者第一次使用 if driver.current_url.__contains__('login.html'): log.info('cookies失效或者第一次使用。。。。') main(driver) log.info('进入页面成功。。。') current_hour=time.strftime("%H", time.localtime()) log.info('当前时间为{}点。。。'.format(current_hour)) if int(current_hour) in (7,8,9,12,13,14,18,19,20,21): try: WebDriverWait(driver,3).until(click_sancanfudai()) except BaseException: log.error('未弹出三餐福袋或已领取过。。。') # log.info('开始做任务。。。') # WebDriverWait(driver,5).until(click_lingshuidi()) # WebDriverWait(driver,5).until(click_meirishuidi()) # # driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[2]/div[2]/div[3]/div[4]').find_element_by_class_name('title') try: WebDriverWait(driver,5).until(click_shuipen()) except BaseException: log.error('界面找不到集水器。。。') try: WebDriverWait(driver,5).until(click_yuanping()) except BaseException: log.error('界面找不到圆瓶。。。') try: WebDriverWait(driver,5).until(click_linghuafei()) except BaseException: log.error('界面找不到领化肥。。。') try: WebDriverWait(driver,5).until(click_daka()) driver.get(url) except BaseException: log.error('界面找不到打卡集水滴') try: WebDriverWait(driver,5).until(click_jiaoshuijingsai()) except BaseException: log.error('界面找不到浇水竞赛') try: WebDriverWait(driver,5).until(visit_friends()) except BaseException: log.error('界面找不到好友列表') for i in range(0,100000): try: res=WebDriverWait(driver,5).until(click_jiaoshui()) if res==2: break log.info('正在点击浇水第{}次。。。'.format(i)) except BaseException: driver.get(url) driver.get(url) try: WebDriverWait(driver,4).until(click_xingyunhongbao()) driver.get(url) except BaseException: log.error('界面找不到幸运红包')
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider9_Pinduoduo3.py
SampleSprider9_Pinduoduo3.py
import requests from bs4 import BeautifulSoup def main(): r= requests.get('https://www.vbiquge.com/15_15338/8549128.html') r.encoding='utf-8' str_html=r.text # str_html=html = """ # <html> # <head> # <title>Jack_Cui</title> # </head> # <body> # <p class="title" name="blog"><b>My Blog</b></p> # <li><!--注释--></li> # <a href="http://blog.csdn.net/c406495762/article/details/58716886" class="sister" id="link1">Python3网络爬虫(一):利用urllib进行简单的网页抓取</a><br/> # <a href="http://blog.csdn.net/c406495762/article/details/59095864" class="sister" id="link2">Python3网络爬虫(二):利用urllib.urlopen发送数据</a><br/> # <a href="http://blog.csdn.net/c406495762/article/details/59488464" class="sister" id="link3">Python3网络爬虫(三):urllib.error异常</a><br/> # </body> # </html> # """ # 建议给定解析器类型 "lxml","lxml-xml", "html.parser", "html5lib"或者"html", "html5", "xml" soup=BeautifulSoup(str_html,'lxml') # 美化输出 # print(soup.prettify()) # 1.Tag 对应标签 获取相应标签,不过只能获取第一个匹配到的标签 # print(type(soup.title)) #<class 'bs4.element.Tag'> # print(soup.title) # print(soup.head) # print(soup.head.title) # print(soup.a) # Tag类有两个属性,name和attrs, name返回标签名;attrs返回该标签所有属性的字典 # print(type(soup.title.name)) # <class 'str'> # print(type(soup.title.attrs)) # <class 'dict'> # print(soup.title.name) # print(soup.title.attrs) # print(soup.meta) # print(soup.meta.name) # print(soup.meta.attrs) # 2.NavigableString 获取标签的值 # print(type(soup.title.string)) # <class 'bs4.element.NavigableString'> # print(soup.title.string) # 3.BeautifulSoup 代表一整个文档 可以视作特殊的Tag, 其也有name和attrs属性 # print(type(soup)) # <class 'bs4.BeautifulSoup'> # print(soup.name) # [document] # print(soup.attrs) # {} # 4.Comment 一个特殊的Navigable String类型 代表标签中的值为注释 当.string输出时,会自动去掉注释,所以有时需要手动处理 # print(type(soup.li)) # <class 'bs4.element.Tag'> # print(soup.li) # <li><!--注释--></li> # print(type(soup.li.string)) # <class 'bs4.element.Comment'> # print(soup.li.string) # 注释 # if type(soup.li.string).__name__=='Comment': # print("<!--{}-->".format(soup.li.string))
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider1_fourClass.py
SampleSprider1_fourClass.py
from bs4 import BeautifulSoup import requests import re from urllib.request import urlretrieve import os import sys # 爬取一章的漫画图片 def getOneChapter(url,chapterName,name): r= requests.get(url) r.encoding='utf-8' soup=BeautifulSoup(r.text,'lxml') # 该页面采取动态加载,反爬手段之一,图片地址散落在script里,,用正则找出三段url str_script_oneChapter=soup.find_all('script',attrs={'type':'text/javascript'})[0].string str_first_url=re.findall('\|+(\d{4})\|+',str_script_oneChapter)[0] str_second_url=re.findall('\|(\d{5})\|',str_script_oneChapter)[0] str_third_urls=re.findall('(\d{13,14})\|',str_script_oneChapter) # 我们这时已经得到 所有图片连接的地址 ,但是是乱序的,需要排一下序 str_third_urls.sort() # 用作图片的名称使用,因为现在单张图片没有名字 pic_name=1 for str_third_url in str_third_urls: url_pic='https://images.dmzj1.com/img/chapterpic/'+str_first_url+'/'+str_second_url+'/'+str_third_url+'.jpg' downloadPic(url_pic,chapterName,pic_name,name) pic_name=pic_name+1 # 下载该url下的图片 def downloadPic(url_pic,chapterName,pic_name,name): # 因为windows下有一些特殊字符不能作为目录名,所以需剔除 chapterName=re.sub('[.|\\|/|:|*|?|"|<|>|\|]','',chapterName) path='D://{}/{}'.format(name,chapterName) # 因为不能写入未创建的目录,所以需要先创建目录 mkdir(path) # 防止连接断开,进行重连操作 try: urlretrieve(url_pic,path+'/{}.jpg'.format(pic_name)) except BaseException: print(chapterName+'\t'+'连接失败,正在重新连接。。。。') downloadPic(url_pic,chapterName,pic_name) # 创建目录 def mkdir(path): if not os.path.exists(path): os.makedirs(path.encode('utf-8')) def main(url='https://www.dmzj.com/info/yaoshenji.html',name='妖神记'): r= requests.get(url) r.encoding='utf-8' soup=BeautifulSoup(r.text,'lxml') tag_ul_allChapters=soup.find_all('ul',attrs={'class':'list_con_li autoHeight'})[1] tag_a_allChapters=tag_ul_allChapters.find_all('a') # 目前一切顺利 已经找到每一章节 的地址和名称 标签对 for tag_a_allChapter in tag_a_allChapters: # 章节url str_url_chapter=tag_a_allChapter.attrs['href'] # 章节名称 str_title_chapter=tag_a_allChapter.find_all('span',attrs={'class':'list_con_zj'})[0].text getOneChapter(str_url_chapter,str_title_chapter,name) print(str_title_chapter+'\t'+'下载完成。')
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider4_Comics.py
SampleSprider4_Comics.py
from selenium import webdriver from selenium.webdriver.common.keys import Keys import time import os from sampleSprider.Common.Info import Taobao headers = { 'authority': 'gma.alicdn.com', 'cache-control': 'no-cache', 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8', 'sec-fetch-site': 'same-site', 'sec-fetch-mode': 'no-cors', 'sec-fetch-user': '?1', 'sec-fetch-dest': 'image', 'referer': 'https://a1.alicdn.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'Referer': 'https://a1.alicdn.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'Origin': 'https://cart.taobao.com', 'Pragma': 'no-cache', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Sec-WebSocket-Key': 'pgWIzG+W79r6suOOQTMNQQ==', 'Upgrade': 'websocket', 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits', 'Cache-Control': 'no-cache', 'Connection': 'Upgrade', 'Sec-WebSocket-Version': '13', 'if-none-match': 'W/"1efe-gibOP5kr+x7GzP3kayA6Wzcii4g"', 'if-modified-since': 'Wed, 18 Jan 2017 08:06:30 GMT', 'pragma': 'no-cache', 'content-type': 'text/plain', 'origin': 'https://cart.taobao.com', } def main(): try: options=webdriver.ChromeOptions() for header in headers: options.add_argument('{}="{}"'.format(header,headers[header])) driver=webdriver.Chrome('D://谷歌驱动/chromedriver.exe') driver.get('https://cart.taobao.com/cart.htm?spm=a1z02.1.1997525049.1.U6BJfg&from=mini&ad_id=&am_id=&cm_id=&pm_id=1501036000a02c5c3739') username=driver.find_element_by_xpath('//*[@id="fm-login-id"]') username.send_keys(Taobao[0]) password=driver.find_element_by_xpath('//*[@id="fm-login-password"]') password.send_keys(Taobao[1]) password.send_keys(Keys.RETURN) time.sleep(5) print(driver.get_cookies()) path='D://SampleSprider/' os.makedirs(path,exist_ok=True) with open(path+'taobao_cookies.txt','w') as file: file.write(driver.get_cookies().__str__()) driver.quit() except BaseException: main()
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider8_Taobao1.py
SampleSprider8_Taobao1.py
from selenium import webdriver from selenium.webdriver.common.keys import Keys import time from selenium.webdriver.support.ui import Select from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from sampleSprider.Common.Info import Tieba # 设置页面加载策略为 在加载完html后开始执行selenium desired_capabilities = DesiredCapabilities.CHROME desired_capabilities["pageLoadStrategy"] = "none" def main(): option=webdriver.ChromeOptions() option.add_argument('user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36"') driver=webdriver.Chrome('D://谷歌驱动/chromedriver.exe',options=option) driver.get('https://tieba.baidu.com/') time.sleep(6) zhmmdl=driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_4__footerULoginBtn"]') zhmmdl.click() time.sleep(3) user_name=driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_4__userName"]') user_name.send_keys(Tieba[0]) password=driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_4__password"]') password.send_keys(Tieba[1]) dl=driver.find_element_by_xpath('//*[@id="TANGRAM__PSP_4__submit"]') dl.click() time.sleep(5) cookies=driver.get_cookies() print(cookies) # select=driver.find_element_by_xpath('//*[@id="plc_frame"]/div[2]/div[2]/p[1]/select') # options=select.find_elements_by_tag_name('option') # for option in options: # print(option.text) # option.click() # 操作select元素 # select=Select(driver.find_element_by_xpath('//*[@id="plc_frame"]/div[2]/div[2]/p[1]/select')) # # select.select_by_index(2) # options=select.all_selected_options # for option in options: # print(option.text) # alert=driver.switch_to.alert # print(alert.text) # for handle in driver.window_handles: # driver.switch_to.window() # 浏览器后退和前进 # elem=driver.find_element_by_xpath('//*[@id="weibo_top_public"]/div/div/div[2]/input') # elem.send_keys('赵立坚') # elem.send_keys(Keys.RETURN) # time.sleep(2) # driver.back() # time.sleep(3) # driver.forward()
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider7_Selenium.py
SampleSprider7_Selenium.py
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from sampleSprider.Common.Info import Paypal from selenium.webdriver.common.action_chains import ActionChains import time headers = { 'authority': 'gma.alicdn.com', 'cache-control': 'no-cache', 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8', 'sec-fetch-site': 'same-site', 'sec-fetch-mode': 'no-cors', 'sec-fetch-user': '?1', 'sec-fetch-dest': 'image', 'referer': 'https://a1.alicdn.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'Referer': 'https://a1.alicdn.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'Origin': 'https://cart.taobao.com', 'Pragma': 'no-cache', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Sec-WebSocket-Key': 'pgWIzG+W79r6suOOQTMNQQ==', 'Upgrade': 'websocket', 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits', 'Cache-Control': 'no-cache', 'Connection': 'Upgrade', 'Sec-WebSocket-Version': '13', 'if-none-match': 'W/"1efe-gibOP5kr+x7GzP3kayA6Wzcii4g"', 'if-modified-since': 'Wed, 18 Jan 2017 08:06:30 GMT', 'pragma': 'no-cache', 'content-type': 'text/plain', 'origin': 'https://cart.taobao.com', } with open('D://SampleSprider/taobao_cookies.txt','r') as file: cookies=eval(file.read()) class click_jiesuan(object): def __call__(self,driver): try: if driver.title.__contains__('确认订单'): return True else: span_jiesuan.click() return False except BaseException: return False class enter_mima(object): def __call__(self,driver): try: span_mima=driver.find_element_by_xpath('//*[@id="payPassword_container"]/div/span') return span_mima except BaseException: return False def main(): options=webdriver.ChromeOptions() for header in headers: options.add_argument('{}="{}"'.format(header,headers[header])) driver=webdriver.Chrome('D://谷歌驱动/chromedriver.exe') driver.maximize_window() driver.get('https://cart.taobao.com/cart.htm?spm=a1z02.1.1997525049.1.U6BJfg&from=mini&ad_id=&am_id=&cm_id=&pm_id=1501036000a02c5c3739') for cookie in cookies: driver.add_cookie(cookie) #driver.refresh() driver.get('https://cart.taobao.com/cart.htm?spm=a1z02.1.1997525049.1.U6BJfg&from=mini&ad_id=&am_id=&cm_id=&pm_id=1501036000a02c5c3739') time.sleep(3) # 全选按钮 label_all_select=driver.find_element_by_xpath('//*[@id="J_SelectAll1"]/div/label') label_all_select.click() # 结算按钮 span_jiesuan=driver.find_element_by_xpath('//*[@id="J_Go"]/span') WebDriverWait(driver,20).until(click_jiesuan()) # 提交订单按钮 a_tijiao=driver.find_element_by_xpath('//*[@id="submitOrderPC_1"]/div/a[2]') a_tijiao.click() # 默认选择支付宝 # 输入支付宝密码 span_mima=WebDriverWait(driver,20).until(enter_mima()) ActionChains(driver).move_to_element(span_mima).click().send_keys(Paypal[0]).perform() # 确认付款 input_fukuan=driver.find_element_by_xpath('//*[@id="J_authSubmit"]') input_fukuan.click()
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider8_Taobao2.py
SampleSprider8_Taobao2.py
#define CO_MAXBLOCKS 40 from selenium import webdriver from selenium.webdriver.chrome.options import Options from sampleSprider.SampleSprider9_Pinduoduo1 import main import time from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) headers = { 'Connection': 'keep-alive', 'Cache-Control': 'no-cache', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'Accept': 'application/json, text/plain, */*', 'Referer': 'https://static.pddpic.com/', 'Accept-Language': 'zh-CN,zh;q=0.9', 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"', 'Origin': 'http://mobile.pinduoduo.com', 'sec-ch-ua-mobile': '?0', 'authority': 'th.pinduoduo.com', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'content-type': 'text/plain;charset=UTF-8', 'accept': '*/*', 'origin': 'http://mobile.pinduoduo.com', 'sec-fetch-site': 'cross-site', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'http://mobile.pinduoduo.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'AccessToken': 'TDTVO6CI6S32K6UCKGJELOUUETBIFO6PWKA5JXSKB3CVX6OSF2XA1103853', 'Content-Type': 'application/json;charset=UTF-8', 'Pragma': 'no-cache', } with open('D://SampleSprider/pingduoduo_cookies.txt','r') as file: cookies=eval(file.read()) class click_jiaoshui(object): def __call__(self,driver): try: jiaoshui=driver.find_element_by_xpath('//*[@id="waterbottle"]/div[3]/div[1]/div/div/img') jiaoshui.click() return True except BaseException: try: jiaoshui2=driver.find_element_by_xpath('//*[@id="waterbottle"]/div[4]') jiaoshui2.click() return True except BaseException: return False class click_lijicunru(object): def __call__(self,driver): try: lijicunru=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[57]/div/div[2]/div[3]/div/div[4]') lijicunru.click() return True except BaseException: return False class click_zhidaole(object): def __call__(self,driver): try: zhidaole=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[57]/div/div[2]/div[3]/div/div[3]') zhidaole.click() return True except BaseException: return False class fiveStates(object): def __call__(self,driver): try: lianxuqiandao=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[134]/div/div[2]/div[1]/div/div/img') lianxuqiandao.click() log.error('关闭 连续签到 提示成功。。。') return True except BaseException: try: huafeilibao=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[33]/div/div/div[3]/div[1]/div') huafeilibao.click() log.error('关闭 化肥礼包 提示成功。。。') return True except BaseException: try: jiaoshuijiabei=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[169]/div/div[2]/button') jiaoshuijiabei.click() log.error('关闭 水满加倍 提示成功。。。') return True except BaseException: try: zhuanpan=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[173]/div/div/div[2]/div/div[1]/div/div/div/img') zhuanpan.click() log.error('关闭 转盘抽奖 提示成功。。。') return True except BaseException: try: # 养分 qujiaoshui2=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[2]/div[2]/button') gongxihuode2=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[2]/div[2]/div[2]').text qujiaoshui2.click() log.error('关闭 {} 提示成功。。。'.format(gongxihuode2)) return True except BaseException: return False class click_lingshuidi(object): def __call__(self,driver): try: lingshuidi=driver.find_element_by_xpath('//*[@id="MissionListIcon"]/div/div/div[2]/img') lingshuidi.click() return True except BaseException: return False class click_meirishuidi(object): def __call__(self,driver): try: meirishuidi=driver.find_element_by_xpath('//*[@id="missionlist-36155"]/div[3]/div/button') meirishuidi.click() log.info('领取每日水滴成功。。。') WebDriverWait(driver,5).until(close_meirishuidi()) return True except BaseException: return False class close_meirishuidi(object): def __call__(self,driver): try: close=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[77]/div/div[2]/div[2]/div/div/img') jixu=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[77]/div/div[2]/div[3]/div[4]/button[2]') jixu.click() log.info('关闭 每日水滴 提示成功。。。') return True except BaseException: return False if __name__=='__main__': url='http://mobile.pinduoduo.com/garden_home.html?_pdd_fs=1&_pdd_tc=676666&_pdd_sbs=1&fun_id=app_home&refer_page_el_sn=15079&refer_page_name=login&refer_page_id=10169_1619419956651_sowf5xitxz&refer_page_sn=10169' options=webdriver.ChromeOptions() for header in headers: options.add_argument('{}="{}"'.format(header,headers[header])) chrome_options = Options() chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222") # 前面设置的端口号 driver = webdriver.Chrome(executable_path='D://谷歌驱动/chromedriver.exe', options=chrome_options) log.info('开始访问拼多多果园。。。。') driver.get(url) driver.maximize_window() # for cookie in cookies: # driver.add_cookie(cookie) # log.info('添加cookies后访问。。。。') # driver.get(url) # cookies失效或者第一次使用 if driver.current_url.__contains__('login.html'): log.info('cookies失效或者第一次使用。。。。') main(driver) log.info('进入页面成功。。。') # log.info('开始做任务。。。') # WebDriverWait(driver,5).until(click_lingshuidi()) # WebDriverWait(driver,5).until(click_meirishuidi()) # # driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[2]/div[2]/div[3]/div[4]').find_element_by_class_name('title') for i in range(0,100000): try: WebDriverWait(driver,8).until(click_jiaoshui()) log.info('正在点击浇水第{}次。。。'.format(i)) except BaseException: try: WebDriverWait(driver,3).until(fiveStates()) except BaseException: try: jingxishuihu=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[3]/div[2]/div[3]/button') jingxishuihu.click() log.error('关闭 惊喜水壶 提示成功。。。') except BaseException: try: # 水滴 qujiaoshui=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[2]/div[2]/button') gongxihuode=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[2]/div[2]/div[2]/div[1]/span').text qujiaoshui.click() log.error('关闭 {} 提示成功。。。'.format(gongxihuode)) except BaseException: try: zhuanpanfanbei=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[56]/div/div[2]/div[2]/div[3]/img') zhuanpanfanbei.click() WebDriverWait(driver,5).until(click_lijicunru()) WebDriverWait(driver,5).until(click_zhidaole()) log.error('关闭 送你水滴翻倍福利 提示成功。。。') except BaseException: try: guoshuzhangguozi=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[5]/div/div[2]/div[2]') guoshuzhangguozi.click() log.error('关闭 果树长出果子了 提示成功。。。') except BaseException: try: xingyunhongbao=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[106]/div[4]/div[3]/button') xingyunhongbao.click() log.error('关闭 幸运红包 提示成功。。。') except BaseException: shuidibuzu=driver.find_element_by_xpath('//*[@id="fruit-container"]/div[4]/div/div/div[2]/div[9]/div[32]/div[76]/div/div[2]/div/div[2]/div/div/img') shuidibuzu.click() log.error('关闭 水滴 提示成功。。。') break
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider9_Pinduoduo2.py
SampleSprider9_Pinduoduo2.py
from selenium import webdriver import time headers = { 'Connection': 'keep-alive', 'Cache-Control': 'no-cache', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'Accept': 'application/json, text/plain, */*', 'Referer': 'https://static.pddpic.com/', 'Accept-Language': 'zh-CN,zh;q=0.9', 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"', 'Origin': 'http://mobile.pinduoduo.com', 'sec-ch-ua-mobile': '?0', 'authority': 'th.pinduoduo.com', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'content-type': 'text/plain;charset=UTF-8', 'accept': '*/*', 'origin': 'http://mobile.pinduoduo.com', 'sec-fetch-site': 'cross-site', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'http://mobile.pinduoduo.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'AccessToken': 'TDTVO6CI6S32K6UCKGJELOUUETBIFO6PWKA5JXSKB3CVX6OSF2XA1103853', 'Content-Type': 'application/json;charset=UTF-8', 'Pragma': 'no-cache', } def main(driver): url='http://mobile.pinduoduo.com/garden_home.html?_pdd_fs=1&_pdd_tc=676666&_pdd_sbs=1&fun_id=app_home&refer_page_el_sn=15079&refer_page_name=login&refer_page_id=10169_1619419956651_sowf5xitxz&refer_page_sn=10169' # cookies失效或者第一次使用 if driver.current_url.__contains__('login.html'): # 手机登录 phone_denglu=driver.find_element_by_xpath('//*[@id="first"]/div[2]/div') phone_denglu.click() phone_number=driver.find_element_by_xpath('//*[@id="user-mobile"]') phone_number.send_keys('13566421619') send_yzm=driver.find_element_by_xpath('//*[@id="code-button"]') time.sleep(1) send_yzm.click() yzm=input('请输入手机验证码:') input_yzm=driver.find_element_by_xpath('//*[@id="input-code"]') input_yzm.send_keys(yzm) denglu=driver.find_element_by_xpath('//*[@id="submit-button"]') denglu.click() time.sleep(3) with open('D://SampleSprider/pingduoduo_cookies.txt','w') as file: file.write(driver.get_cookies().__str__()) print(driver.get_cookies())
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider9_Pinduoduo1.py
SampleSprider9_Pinduoduo1.py
from bs4 import BeautifulSoup import requests import re from urllib.request import urlretrieve import os import sys import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) # 该程序设置了运行参数,主程序不再改动,从数组第二个参数开始设置,分别为漫画章节目录地址 漫画名字 # sys.argv =[sys.argv[0],'https://www.dmzj.com/info/dengpaozhixin2.html','灯泡之心'] #sys.argv =[sys.argv[0],'https://www.dmzj.com/info/yaoshenji.html','妖神记'] # sys.argv =[sys.argv[0],'https://www.dmzj.com/info/aaguziyaoruxuejuedouxueyuandeyangzi.html','【AA】咕哒子要入学决斗学院的样子'] # 爬取一章的漫画图片 def getOneChapter(url,chapterName,name): r= requests.get(url) r.encoding='utf-8' soup=BeautifulSoup(r.text,'lxml') # 该页面采取动态加载,反爬手段之一,图片地址散落在script里,,用正则找出三段url list_script_oneChapter=soup.find_all('script',attrs={'type':'text/javascript'}) # 部分章节资源出错 if list_script_oneChapter.__len__()==0: log.error('{} 链接出错'.format(chapterName)) return ; else: str_script_oneChapter=list_script_oneChapter[0].string tuple_oneChapter=re.findall("return p}.*{(.*)}.*',(\d+),(\d+),'(.*)'\.split",str_script_oneChapter)[0] list_rules=tuple_oneChapter[0].split(',')[2].split(':')[1].strip('"').split('\\\\/') radix=tuple_oneChapter[1] len=tuple_oneChapter[2] list_part_url=tuple_oneChapter[3].split('|') url_pic='https://images.dmzj1.com/' # 用作图片的名称使用,因为现在单张图片没有名字 pic_name=1 for list_rule in list_rules: if '.' in list_rule: list_finally_url=list_rule.split('\\\\')[0].split('.') url_pic+=list_part_url[sixtyTwoToDecimal(list_finally_url[0])]+'.'+list_part_url[sixtyTwoToDecimal(list_finally_url[1])] downloadPic(url_pic,chapterName,pic_name,name) pic_name+=1 url_pic='https://images.dmzj1.com/img/' else: url_pic=url_pic+list_part_url[sixtyTwoToDecimal(list_rule)]+'/' # 下载该url下的图片 def downloadPic(url_pic,chapterName,pic_name,name): # 因为windows下有一些特殊字符不能作为目录名,所以需剔除 chapterName=re.sub('[.|\\|/|:|*|?|"|<|>|\|]','',chapterName) path='D://{}/{}'.format(name,chapterName) # 因为不能写入未创建的目录,所以需要先创建目录 mkdir(path) # 防止连接断开,进行重连操作 try: urlretrieve(url_pic,path+'/{}.jpg'.format(pic_name)) except BaseException: log.error(chapterName+'\t'+url_pic+'连接失败,正在重新连接。。。。') downloadPic(url_pic,chapterName,pic_name,name) # 创建目录 def mkdir(path): if not os.path.exists(path): os.makedirs(path.encode('utf-8')) # 六十二进制 转 十进制 def sixtyTwoToDecimal(n): n_len=len(n) if n_len==1: return oneCharToInt(n) res=0 for i in range(0,n_len): res+=pow(62,n_len-1-i)*oneCharToInt(n[i:i+1]) return res # 一个字符 六十二进制 转 十进制 def oneCharToInt(n): ascii_n=ord(n) if ascii_n>=48 and ascii_n<=57: return ascii_n-48 elif ascii_n>=97 and ascii_n<=122: return ascii_n-87 elif ascii_n>=65 and ascii_n<=90: return ascii_n-29 def main(url='https://www.dmzj.com/info/yaoshenji.html',name='妖神记'): r= requests.get(url) r.encoding='utf-8' soup=BeautifulSoup(r.text,'lxml') tag_ul_allChapters=soup.find_all('ul',attrs={'class':'list_con_li autoHeight'})[1] tag_a_allChapters=tag_ul_allChapters.find_all('a') # 目前一切顺利 已经找到每一章节 的地址和名称 标签对 for tag_a_allChapter in tag_a_allChapters: # 章节url str_url_chapter=tag_a_allChapter.attrs['href'] # 章节名称 str_title_chapter=tag_a_allChapter.find_all('span',attrs={'class':'list_con_zj'})[0].text getOneChapter(str_url_chapter,str_title_chapter,name) log.info(str_title_chapter+'\t'+'下载完成。')
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider4_Comics1.py
SampleSprider4_Comics1.py
import requests headers = { 'authority': 'gma.alicdn.com', 'cache-control': 'no-cache', 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8', 'sec-fetch-site': 'same-site', 'sec-fetch-mode': 'no-cors', 'sec-fetch-user': '?1', 'sec-fetch-dest': 'image', 'referer': 'https://a1.alicdn.com/', 'accept-language': 'zh-CN,zh;q=0.9', 'cookie': 't=9c4249a362a7567ff54490700617c0ca; xlly_s=1; thw=cn; cna=6UT8GGIdq0kCAXBAg2VSYHgZ; _samesite_flag_=true; cookie2=122c59e0073722ea70a887147a096ecf; _tb_token_=e3e38836d5bed; sgcookie=E100w%2B5heYpvWGMB796FZfaQ1JbuW9T2bwr9lYtEs5rMXbhInSCD2j7MY8O77ARl%2Fboz%2Bvfxh1wXIo1O3FD1UTA13A%3D%3D; unb=4286722913; uc3=lg2=UtASsssmOIJ0bQ%3D%3D&vt3=F8dCuwpnl0yrc4%2BJGyg%3D&nk2=F5RCYRiZq7FIMF0%3D&id2=Vy6wuiK%2BS0fkWw%3D%3D; csg=78b48096; lgc=tb743940389; cookie17=Vy6wuiK%2BS0fkWw%3D%3D; dnk=tb743940389; skt=9be306c41f340ccc; existShop=MTYxODU0MTU3OQ%3D%3D; uc4=id4=0%40VXkX0rZulCIxTBcvUj3yarFDqlej&nk4=0%40FY4Jj1fjy134mSEe8CjdnWHfThnusA%3D%3D; tracknick=tb743940389; _cc_=Vq8l%2BKCLiw%3D%3D; _l_g_=Ug%3D%3D; sg=937; _nk_=tb743940389; cookie1=U%2BJ4RwTGBOS7q2WhDFVS1zuEnBDdVCa8rH3wvCldIYk%3D; _m_h5_tk=5517bf0beb5723faa6c7189c7e7ef373_1618550580915; _m_h5_tk_enc=23d58d5205cffe9fde3f09a71de84d5f; mt=ci=4_1; uc1=cookie14=Uoe1iuWW1lLyNg%3D%3D&existShop=false&cookie16=V32FPkk%2FxXMk5UvIbNtImtMfJQ%3D%3D&cart_m=0&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=W5iHLLyFe3xm&pas=0; l=eBgznlKcjwXjEw8vBOfwnurza77tsIRAguPzaNbMiOCPO4CH5etFW6a_WaLMCnGVh6JWR3yIziz8BeYBqC22sWRdUKaOYMkmn; tfstk=ckoRB2gS7IAl1t2xQ4LcdOA2k8vGZ5OLGTNhvLL9WhgOfAIdiBiiXVaMPJ18DKC..; isg=BG1tOleM8bWal5XWuQQRMus4fAnnyqGcC5WUE69yrIRxJo3Ydxh9bAn4EPrAprlU', 'Referer': 'https://a1.alicdn.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'Origin': 'https://cart.taobao.com', 'Pragma': 'no-cache', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Sec-WebSocket-Key': 'pgWIzG+W79r6suOOQTMNQQ==', 'Upgrade': 'websocket', 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits', 'Cache-Control': 'no-cache', 'Connection': 'Upgrade', 'Sec-WebSocket-Version': '13', 'if-none-match': 'W/"1efe-gibOP5kr+x7GzP3kayA6Wzcii4g"', 'if-modified-since': 'Wed, 18 Jan 2017 08:06:30 GMT', 'pragma': 'no-cache', 'content-type': 'text/plain', 'origin': 'https://cart.taobao.com', } params = ( ('spm', 'a1z02.1.1997525049.1.U6BJfg'), ('from', 'mini'), ('ad_id', ''), ('am_id', ''), ('cm_id', ''), ('pm_id', '1501036000a02c5c3739'), ) data = [ ('{}', ''), ('{}', ''), ('data', '{"pageSize":20,"endTime":1618542900510}'), ] def main(): response = requests.post('https://cart.taobao.com/cart.htm', headers=headers, params=params, data=data) print(response.text)
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/SampleSprider8_Taobao.py
SampleSprider8_Taobao.py
import requests if __name__=="__main__": url1="https://www.baidu.com" url2="https://tieba.baidu.com/f" # # 谷歌浏览器用户代理 headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'} # # iphone Safari浏览器代理 # headers1={'user-agent':'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7'} # # # r1=requests.get(url1,headers=headers1) # print(r1.status_code) # r1.encoding="UTF-8" # print(r1.text) # print(r1.headers['content-type']) # print(r1.encoding) # r2=requests.get(url2,params={"kw":"赛博朋克2077"}) # print(r2.status_code) # print(r2.text) # print(r2.url) # r2=requests.head(url1) # print(r2.text) # print(r2.status_code) # print(r2.headers['content-type']) # print(r2.encoding) # r3=requests.post(url1) # print(r3.status_code) # print(r3.text) # print(r3.encoding) # r4=requests.options(url1) # print(r4.status_code) # print(r4.text) # print(r4.url) # r = requests.get('https://api.github.com/events/404') # print(r.status_code) # print(r.raise_for_status()) # print(r.text) # print(r.json()) # print(r.content) url3="https://httpbin.org/post" url4="https://httpbin.org/get" # r=requests.post(url3,json={'key1':'value1','key2':'value2'},headers=headers) # print(r.text) # r=requests.post(url3,data={'key1':'value1','key2':'value2'}) # print(r.text) # r=requests.get(url4) # print(r.text) # print(r.headers) url5 = 'http://dp.58corp.com/data-develop/task-list' url6='https://httpbin.org/cookies' cookies=dict(Cookie="wmda_uuid=7e9f46c2f9586cebac861571355b237d; wmda_new_uuid=1; wmda_visited_projects=%3B12590408761269; DP_SESSION_ID=74b789b3-5a3a-49ab-9346-0b61e754e742; SSO_SESSION_ID=ST-3722946-3HolqHNvdxt00mEoOgNG-passport-58corp-com; wmda_session_id_12590408761269=1616485499262-abc8b58f-de86-23de") cookies1=dict(cookies_are='working') r= requests.get(url5, headers=headers) print(r.status_code) print(r.text) # s=requests.session() # r=s.get(url1,headers=headers) # print(r.text) # print(r.cookies)
zf-sampleSprider
/zf_sampleSprider-0.2.0-py3-none-any.whl/sampleSprider_old/RequestsStudty.py
RequestsStudty.py
新版正方系统 Python SDK。(支持自动识别、处理滑块验证码与常规验证码,如果觉得还不错,给个小星星趴~⭐) <!-- [![Build Status](https://travis-ci.org/dairoot/school-api.svg?branch=master)](https://travis-ci.org/dairoot/school-api) [![Scrutinizer Code Quality](https://scrutinizer-ci.com/g/dairoot/school-api/badges/quality-score.png?b=master)](https://scrutinizer-ci.com/g/dairoot/school-api/?branch=master) [![codecov](https://codecov.io/gh/dairoot/school-api/branch/master/graph/badge.svg)](https://codecov.io/gh/dairoot/school-api) [![pypi](https://img.shields.io/pypi/v/school-api.svg)](https://pypi.org/project/school-api/) [![Downloads](https://pepy.tech/badge/school-sdk)](https://pepy.tech/project/school-api) --> <!-- <p align = "center"> --> [![Downloads](https://pepy.tech/badge/school-sdk)](https://pepy.tech/project/school-sdk) <a href = "https://www.python.org"> <img alt = "python3" src = "https://img.shields.io/badge/language-python3-brightgreen" /> </a> <a href = "LICENSE"> <img alt = "license" src = "https://img.shields.io/badge/license-MIT-blue.svg" /> </a> <a href = "https://github.com/FarmerChillax/new-school-sdk/stargazers/"> <img alt = "stars" src = "https://badgen.net/github/stars/FarmerChillax/new-school-sdk/" /> </a> <a href = "https://github.com/FarmerChillax/new-school-sdk/network/members/"> <img alt = "forks" src = "https://badgen.net/github/forks/FarmerChillax/new-school-sdk/" /> </a> <!-- </p> --> [在线文档](https://farmerChillax.github.io/new-school-sdk/) ## 测试环境 - Python == 3.8 - 默认验证码识别方式: CPU ## Usage ```Shell $ pip install school-sdk # or $ pip install zf-school-sdk ``` ```Python from school_sdk import SchoolClient # 先实例化一个学校,再实例化用户 school = SchoolClient("172.16.254.1") user:UserClient = school.user_login("2018xxxxx", "xxxxxxxx") course = user.get_schedule(year=2020, term=2) print(course) ``` 使用示例参见 [examples](examples/) ## Api Function | Api | Description | Argument | | :------------ | :-------------------------- | :---------------- | | user_login | 登陆函数 | account, password | | get_schedule | 课表查询 | year, term | | get_score | 成绩查询 | year, term | | get_info | 获取个人信息 | None | | refresh_info | 刷新个人信息 | None | | check_session | 检查session并其失效后重登录 | None | ## School-SDK Options | Option | Default | Description | | :------------ | :----------- | :----------------------- | | host | 不存在默认值 | 教务系统地址(`必填`) | | port | 80 | 端口号 | | ssl | False | 教务系统是否使用https | | name | None | 学校名称 | | exist_verify | False | 是否存在验证码 | | captcha_type | captcha | 验证码类型(常规 或 滑块) | | retry | 10 | 登录重试次数 | | lan_host | None | 内网地址(暂不可用) | | lan_port | 80 | 内网地址端口(暂不可用) | | timeout | 10 | 全局请求延时 | | url_endpoints | None | 地址配置 | ## 相关项目 - 新版正方教务系统: https://github.com/Farmer-chong/new-school-sdk - 旧版正方教务系统: https://github.com/dairoot/school-api - SDK的Flask扩展: https://github.com/Farmer-chong/flask-school <!-- | <!-- | url_path_list | `略` | 学校接口地址列表 | | class_time_list | `略` | 上课时间列表 | | timeout | 10 | 全局请求延时 | | session | MemoryStorage | 缓存工具(推荐使用redis) | | -->
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/README.md
README.md
import binascii from .pyrng import SecureRandom from .pyjsbn import BigInteger from .pyb64 import Base64 class RsaKey: def __init__(self): self.n = None self.e = 0 self.d = None self.p = None self.q = None self.dmp1 = None self.dmq1 = None self.coeff = None def set_public(self, n, e): if n is not None and e is not None and len(n) > 0 and len(e) > 0: self.n = BigInteger(n, 16) self.e = int(e, 16) else: raise ValueError("Invalid RSA public key") def linkbrk(self, s, n): ret = '' i = 0 while i + n < len(s): ret += s[i, i + n] + '\n' i += n return ret + s[i, len(s)] def byte2hex(self, b): if b < 0x10: return '0' + binascii.b2a_hex(b) else: return binascii.b2a_hex(b) def pkcs1pad2(self, s, n): if n < len(s) + 11: print("Message too long for RSA") exit() ba = {} i = len(s) - 1 while i >= 0 and n > 0: c = ord(s[i]) i -= 1 if c < 128: n -= 1 ba[n] = c elif 127 < c < 2048: n -= 1 ba[n] = (c & 63) | 128 n -= 1 ba[n] = (c >> 6) | 192 else: n -= 1 ba[n] = (c & 63) | 128 n -= 1 ba[n] = ((c >> 6) & 63) | 128 n -= 1 ba[n] = (c >> 12) | 224 n -= 1 ba[n] = 0 rng = SecureRandom() x = {} while n > 2: """ 产生与时间相关的随机阵列对按字符解析后的 ba 进行填充 """ x[0] = 0 while x[0] == 0: rng.rng_get_bytes(x) n -= 1 ba[n] = x[0] n -= 1 ba[n] = 2 n -= 1 ba[n] = 0 # print() bi = BigInteger(ba) # print() return BigInteger(ba) def do_public(self, x): return x.pow_int(self.e, self.n) def rsa_encrypt(self, text): m = self.pkcs1pad2(text, (self.n.bit_length() + 7) >> 3) if m is None: return None c = self.do_public(m) if c is None: return None h = c.to_string(16) if len(h) & 1 == 0: return h else: return '0' + h if __name__ == '__main__': rsa = RsaKey() m = "AKRB6FwmOe0hE9Uo6LMKoDE5U9JU9lH1v8Uv7ATjRj2W+aTPlR9Hfm8fR782pzGwDsTD4Yr7tBHQ1cuEnGrqrJn5HuPiLqmSg4Z/AwS+Rq8eE7T+ZaGoUtpqvcoSffSJOW29RNVMwT391ona/+eK5B3RkC9WaJFYiZai7FiQDeXT" e = 'AQAB' rsa.set_public(Base64().b64tohex(m), Base64().b64tohex(e)) rr = rsa.rsa_encrypt('1234567890') enpsw = Base64().hex2b64(rr) print(enpsw)
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/PyRsa/pyrsa.py
pyrsa.py
import binascii class Base64: def __init__(self): self.b64map = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" self.b64pad = "=" self.idx = "0123456789abcdefghijklmnopqrstuvwxyz" def hex2b64(self, h): ret = '' ii = 0 for i in range(0, len(h) - 2, 3): c = int(h[i:i+3], 16) ret += self.b64map[c >> 6] + self.b64map[c & 63] ii = i ii += 3 if ii + 1 == len(h): c = int(h[ii:ii+1], 16) ret += self.b64map[c << 2] elif ii + 2 == len(h): c = int(h[ii:ii + 2], 16) ret += self.b64map[c >> 2] + self.b64map[(c & 3) << 4] while (len(ret) & 3) > 0: ret += self.b64pad return ret def b64tohex(self, s): ret = '' k = 0 slop = 0 for i in range(len(s)): if s[i] == self.b64pad: break v = self.b64map.index(s[i]) if v < 0: continue if k == 0: ret += self.idx[v >> 2] slop = v & 3 k = 1 elif k == 1: ret += self.idx[(v >> 4) | (slop << 2)] slop = v & 0xf k = 2 elif k == 2: ret += self.idx[slop] ret += self.idx[v >> 2] slop = v & 3 k = 3 else: ret += self.idx[(slop << 2) | (v >> 4)] ret += self.idx[v & 0xf] k = 0 if k == 1: ret += self.idx[slop << 2] return ret def b64toBA(self, s): h = self.b64tohex(s) a = [] r = int(len(s) / 2) + 1 for i in range(r): a.append(binascii.b2a_hex(h[2*i:2*i+2].encode('utf-8'))) return a if __name__ == '__main__': h = "9134d5d73ad3c7e4224e47068308ea8a54f8bd9067aff8c1016c3809a652be529c03059366780c55496352eed46d632ebabedf05038f" \ "123d124baf3f2cb1cbea6ff12e1a76023b7398dab734cad33f67aab2f36a3a592776aea30bfbb151db14c618fba3df8ef595a251270" \ "858997a323ef743b83b19b89b74848a03737007e9" b = Base64() bh = b.hex2b64(h) ret = "kTTV1zrTx+QiTkcGgwjqilT4vZBnr/jBAWw4CaZSvlKcAwWTZngMVUljUu7UbWMuur7fBQOPEj0SS68/LLHL6m/xLhp2AjtzmNq3NMr" \ "TP2eqsvNqOlkndq6jC/uxUdsUxhj7o9+O9ZWiUScIWJl6Mj73Q7g7GbibdISKA3NwB+k=" print(bh == ret)
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/PyRsa/pyb64.py
pyb64.py
import time import random from .tools import unsigned_right_shift class ArcFour: def __init__(self): self.i = 0 self.j = 0 self.S = {} def init(self, key): for i in range(256): self.S[i] = i j = 0 for i in range(256): j = (j + self.S[i] + key[i % len(key)]) & 255 self.S[i], self.S[j] = self.S[j], self.S[i] self.i = 0 self.j = 0 def next(self): self.i = (self.i + 1) & 255 self.j = (self.j + self.S[self.i]) & 255 t = self.S[self.i] self.S[self.i] = self.S[self.j] self.S[self.j] = t return self.S[(t + self.S[self.i]) & 255] class SecureRandom: def __init__(self): self.rng_state = None self.rng_pool = None self.rng_pptr = None self.rng_psize = 256 if self.rng_pool is None: self.rng_pool = {} self.rng_pptr = 0 while self.rng_pptr < self.rng_psize: self.t = int(65536 * random.random()) self.rng_pool[self.rng_pptr] = unsigned_right_shift(self.t, 8) self.rng_pptr += 1 self.rng_pool[self.rng_pptr] = self.t & 255 self.rng_pptr += 1 self.rng_pptr = 0 self.rng_seed_time() def rng_seed_int(self, x): self.rng_pool[self.rng_pptr] ^= x & 255 self.rng_pptr += 1 self.rng_pool[self.rng_pptr] ^= (x >> 8) & 255 self.rng_pptr += 1 self.rng_pool[self.rng_pptr] ^= (x >> 8) & 255 self.rng_pptr += 1 self.rng_pool[self.rng_pptr] ^= (x >> 24) & 255 if self.rng_pptr >= self.rng_psize: self.rng_pptr -= self.rng_psize def rng_seed_time(self): self.rng_seed_int(int(time.time() * 1000)) def rng_get_byte(self): if self.rng_state is None: self.rng_seed_time() self.rng_state = ArcFour() self.rng_state.init(self.rng_pool) for self.rng_ppt in range(len(self.rng_pool)): self.rng_pool[self.rng_pptr] = 0 self.rng_pptr = 0 return self.rng_state.next() def rng_get_bytes(self, ba): for i in range(len(ba)): ba[i] = self.rng_get_byte()
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/PyRsa/pyrng.py
pyrng.py
import math from .tools import unsigned_right_shift class Classic: def __init__(self, m): self.m = m def convert(self, x): if x.int_dict['s'] < 0 or x.compare2(self.m) >= 0: return x.mod(self.m) else: return x def revert(self, x): return x def reduce(self, x): x.rem2(self.m, None, x) def mul2(self, x, y, r): x.multiply2(y, r) self.reduce(r) def sqr2(self, x, r): x.square2(r) self.reduce(r) class Montgomery: def __init__(self, m): self.m = m self.mp = m.inv_digit() self.mpl = self.mp & 0x7fff self.mph = self.mp >> 15 self.um = (1 << (m.DB - 15)) - 1 self.mt2 = 2 * m.int_dict['t'] def convert(self, x): r = BigInteger(None) x.abs().dl_shift2(self.m.int_dict['t'], r) r.rem2(self.m, None, r) if x.int_dict['s'] < 0 < r.compare2(ZERO): self.m.sub2(r, r) return r def reduce(self, x): while x.int_dict['t'] <= self.mt2: x.int_dict[x.int_dict['t']] = 0 x.int_dict['t'] += 1 for i in range(self.m.int_dict['t']): j = x.int_dict[i] & 0x7fff u0 = (j * self.mpl + (((j * self.mph + (x.int_dict[i] >> 15) * self.mpl) & self.um) << 15)) & x.DM j = i + self.m.int_dict['t'] x.int_dict[j] += self.m.am(0, u0, x, i, 0, self.m.int_dict['t']) while x.int_dict[j] >= x.DV: x.int_dict[j] -= x.DV j += 1 x.int_dict[j] += 1 x.clamp() x.dr_shift2(self.m.int_dict['t'], x) # x.dr_shift2() 执行后数据与浏览器一致 if x.compare2(self.m) >= 0: x.sub2(self.m, x) def sqr2(self, x, r): x.square2(r) self.reduce(r) def revert(self, x): r = BigInteger(None) x.copy2(r) self.reduce(r) return r def mul2(self, x, y, r): x.multiply2(y, r) self.reduce(r) class BigInteger: def __init__(self, a, b=None, c=None): self.int_dict = dict({i: None for i in range(37)}, **{'s': 0, 't': 0}) self.BI_RM = "0123456789abcdefghijklmnopqrstuvwxyz" self.BI_RC = self.bi_rc() self.DB = 28 self.DM = 268435455 self.DV = 268435456 self.F1 = 24 self.F2 = 4 self.FV = 4503599627370496 if a is not None: if b is None and type(a) != str: self.from_string(a, 256) else: self.from_string(a, b) else: self.int_dict = {'s': 0, 't': 0} def __getitem__(self, item): return self.int_dict def int2char(self, n): return self.BI_RM[n] def am1(self, i, x, w, j, c, n): n -= 1 while n >= 0: v = x * self.int_dict[i] + w.int_dict[j] + c i += 1 c = int(v / 0x4000000) w.int_dict[j] = v & 0x3ffffff j += 1 n -= 1 return c def am2(self, i, x, w, j, c, n): xl = x & 0x7fff xh = x >> 15 n -= 1 while n >= 0: l = self.int_dict[i] & 0x7fff h = self.int_dict[i] >> 15 i += 1 m = xh * l + h * xl l = xl * l + ((m & 0x7fff) << 15) + w.int_dict[j] + (c & 0x3fffffff) c = unsigned_right_shift(l, 30) + unsigned_right_shift(m, 15) + xh * h + unsigned_right_shift(c, 30) w[j] = l & 0x3fffffff j += 1 n -= 1 return c def am(self, i, x, w, j, c, n): xl = x & 0x3fff xh = x >> 14 for k in range(n - 1, -1, -1): ll = self.int_dict[i] & 0x3fff h = self.int_dict[i] >> 14 i += 1 m = xh * ll + h * xl ll = xl * ll + ((m & 0x3fff) << 14) + w.int_dict[j] + c c = (ll >> 28) + (m >> 14) + xh * h w.int_dict[j] = ll & 0xfffffff j += 1 return c def nbv(self, i): r = BigInteger(None) r.from_int(i) return r def bi_rc(self): birc = {} rr = ord('0') for vv in range(10): birc[rr] = vv rr += 1 rr = ord('a') for vv in range(10, 36): birc[rr] = vv rr += 1 rr = ord('A') for vv in range(10, 36): birc[rr] = vv rr += 1 return birc def from_int(self, x): self.int_dict['t'] = 1 self.int_dict['s'] = -1 if x < 0 else 0 if x > 0: self.int_dict[0] = x elif x < -1: self.int_dict[0] = x + self.DV else: self.int_dict['t'] = 0 def from_string(self, s, b): k = int(math.log(b, 2)) i = len(s) mi = False sh = 0 i -= 1 while i > 0: x = s[i] & 0xff if k == 8 else self.intat(s, i) if x < 0: if s[i] == '-': mi = True continue mi = False if sh == 0: self.int_dict[self.int_dict['t']] = x self.int_dict['t'] += 1 elif sh + k > self.DB: self.int_dict[self.int_dict['t'] - 1] |= (x & ((1 << (self.DB - sh)) - 1)) << sh self.int_dict[self.int_dict['t']] = (x >> (self.DB - sh)) self.int_dict['t'] += 1 else: self.int_dict[self.int_dict['t'] - 1] |= x << sh sh += k if sh >= self.DB: sh -= self.DB i -= 1 if k == 8 and (s[0] & 0x80) != 0: self.int_dict['s'] = -1 if sh > 0: self.int_dict[self.int_dict['t'] - 1] |= ((1 << (self.DB - sh)) - 1) << sh self.clamp() if mi: self.sub2(self, self) def intat(self, s, i): try: c = self.BI_RC[ord(s[i])] except: return -1 return c def clamp(self): c = self.int_dict['s'] & self.DM while self.int_dict['t'] > 0 and self.int_dict[self.int_dict['t'] - 1] == c: self.int_dict['t'] -= 1 def to_string(self, b): if self.int_dict['s'] < 0: return '-' + self.negate().to_string(b) k = int(math.log(b, 2)) km = (1 << k) - 1 m = False r = '' i = self.int_dict['t'] p = self.DB - (i * self.DB) % k if i > 0: i -= 1 d = self.int_dict[i] >> p if p < self.DB and d > 0: m = True r = self.int2char(d) while i >= 0: if p < k: d = (self.int_dict[i] & ((1 << p) - 1)) << (k - p) i -= 1 p += self.DB - k d |= self.int_dict[i] >> p else: p -= k d = (self.int_dict[i] >> p) & km if p <= 0: p += self.DB i -= 1 if d > 0: m = True if m: r += self.int2char(d) return r if m else '0' def sub2(self, a, r): i = 0 c = 0 m = min(a.int_dict['t'], self.int_dict['t']) while i < m: c += self.int_dict[i] - a.int_dict[i] r.int_dict[i] = c & self.DM i += 1 c >>= self.DB if a.int_dict['t'] < self.int_dict['t']: c -= a.int_dict['s'] while i < self.int_dict['t']: c += self.int_dict[i] r.int_dict[i] = c & self.DM i += 1 c >>= self.DB c += self.int_dict['s'] else: c += self.int_dict['s'] while i < a.int_dict['t']: c -= a.int_dict[i] r.int_dict[i] = c & self.DM i += 1 c >>= self.DB c -= a.int_dict['s'] r.int_dict['s'] = -1 if c < 0 else 0 if c < -1: r.int_dict[i] = self.DV i += 1 elif c > 0: r.int_dict[i] = c i += 1 r.int_dict['t'] = i r.clamp() def copy2(self, r): for i in range(self.int_dict['t'] - 1, -1, -1): r.int_dict[i] = self.int_dict[i] r.int_dict['t'] = self.int_dict['t'] r.int_dict['s'] = self.int_dict['s'] def nbits(self, x): r = 1 t = unsigned_right_shift(x, 16) if t != 0: x = t r += 16 t = x >> 8 if t != 0: x = t r += 8 t = x >> 4 if t != 0: x = t r += 4 t = x >> 2 if t != 0: x = t r += 2 t = x >> 1 if t != 0: x = t r += 1 return r def negate(self): r = BigInteger(None) ZERO.sub2(self, r) return r def abs(self): return self.negate() if self.int_dict['s'] < 0 else self def compare2(self, a): r = self.int_dict['s'] - a.int_dict['s'] if r != 0: return r i = self.int_dict['t'] r = i - a.int_dict['t'] if r != 0: return -r if self.int_dict['s'] < 0 else r for k in range(i - 1, -1, -1): r = self.int_dict[k] - a.int_dict[k] if r != 0: return r return 0 def bit_length(self): if self.int_dict['t'] <= 0: return 0 return self.DB * (self.int_dict['t'] - 1) + self.nbits( self.int_dict[self.int_dict['t'] - 1] ^ (self.int_dict['s'] & self.DM) ) def dl_shift2(self, n, r): for i in range(self.int_dict['t'] - 1, -1, -1): r.int_dict[i + n] = self.int_dict[i] for i in range(n - 1, -1, -1): r.int_dict[i] = 0 r.int_dict['t'] = self.int_dict['t'] + n r.int_dict['s'] = self.int_dict['s'] def l_shift2(self, n, r): bs = n % self.DB cbs = self.DB - bs bm = (1 << cbs) - 1 ds = int(n / self.DB) c = (self.int_dict['s'] << bs) & self.DM for i in range(self.int_dict['t'] - 1, -1, -1): r.int_dict[i + ds + 1] = (self.int_dict[i] >> cbs) | c c = (self.int_dict[i] & bm) << bs for i in range(ds - 1, -1, -1): r.int_dict[i] = 0 r.int_dict[ds] = c r.int_dict['t'] = self.int_dict['t'] + ds + 1 r.int_dict['s'] = self.int_dict['s'] r.clamp() def dr_shift2(self, n, r): for i in range(n, self.int_dict['t']): r.int_dict[i - n] = self.int_dict[i] r.int_dict['t'] = max(self.int_dict['t'] - n, 0) r.int_dict['s'] = self.int_dict['s'] def r_shift2(self, n, r): r.int_dict['s'] = self.int_dict['s'] ds = int(n / self.DB) if ds >= self.int_dict['t']: r.int_dict['t'] = 0 return bs = n % self.DB cbs = self.DB - bs bm = (1 << bs) - 1 r.int_dict[0] = self.int_dict[ds] >> bs for i in range(ds + 1, self.int_dict['t']): r.int_dict[i - ds - 1] |= (self.int_dict[i] & bm) << cbs r.int_dict[i - ds] = self.int_dict[i] >> bs if bs > 0: r.int_dict[self.int_dict['t'] - ds - 1] |= (self.int_dict['s'] & bm) << cbs r.int_dict['t'] = self.int_dict['t'] - ds r.clamp() def multiply2(self, a, r): x = self.abs() y = a.abs() i = x.int_dict['t'] r.int_dict['t'] = i + y.int_dict['t'] i -= 1 while i >= 0: r.int_dict[i] = 0 i -= 1 for i in range(y.int_dict['t']): r.int_dict[i + x.int_dict['t']] = x.am(0, y.int_dict[i], r, i, 0, x.int_dict['t']) r.int_dict['s'] = 0 r.clamp() if self.int_dict['s'] != a.int_dict['s']: ZERO.sub2(r, r) def square2(self, r): x = self.abs() i = r.int_dict['t'] = 2 * x.int_dict['t'] for k in range(i - 1, -1, -1): r.int_dict[k] = 0 ii = 0 for i in range(x.int_dict['t'] - 1): c = x.am(i, x.int_dict[i], r, 2 * i, 0, 1) r.int_dict[i + x.int_dict['t']] += x.am(i + 1, 2 * x.int_dict[i], r, 2 * i + 1, c, x.int_dict['t'] - i - 1) if r.int_dict[i + x.int_dict['t']] >= x.DV: r.int_dict[i + x.int_dict['t']] -= x.DV r.int_dict[i + x.int_dict['t'] + 1] = 1 ii = i ii += 1 if r.int_dict['t'] > 0: r.int_dict[r.int_dict['t'] - 1] += x.am(ii, x.int_dict[ii], r, 2 * ii, 0, 1) r.int_dict['s'] = 0 r.clamp() def rem2(self, m, q=None, r=None): pm = m.abs() if pm.int_dict['t'] <= 0: return pt = self.abs() if pt.int_dict['t'] < pm.int_dict['t']: if q is not None: q.from_int(0) if r is not None: self.copy2(r) return if r is None: r = BigInteger(None) y = BigInteger(None) ts = self.int_dict['s'] ms = m.int_dict['s'] nsh = self.DB - self.nbits(pm.int_dict[pm.int_dict['t'] - 1]) if nsh > 0: pm.l_shift2(nsh, y) pt.l_shift2(nsh, r) else: pm.copy2(y) pt.copy2(r) ys = y.int_dict['t'] y0 = y.int_dict[ys - 1] if y0 == 0: return yt = y0 * (1 << self.F1) + (y.int_dict[ys - 2] >> self.F2 if ys > 1 else 0) d1 = self.FV / yt d2 = (1 << self.F1) / yt e = 1 << self.F2 i = r.int_dict['t'] j = i - ys t = BigInteger(None) if q is None else q y.dl_shift2(j, t) if r.compare2(t) >= 0: r.int_dict[r.int_dict['t']] = 1 r.int_dict['t'] += 1 r.sub2(t, r) ONE.dl_shift2(ys, t) t.sub2(y, y) while y.int_dict['t'] < ys: y.int_dict[y.int_dict['t']] = 0 y.int_dict['t'] += 1 for k in range(j - 1, -1, -1): i -= 1 qd = self.DM if r.int_dict[i] == y0 else \ int(r.int_dict[i] * d1 + (r.int_dict[i-1] + e) * d2) r.int_dict[i] += y.am(0, qd, r, k, 0, ys) if r.int_dict[i] < qd: y.dl_shift2(k, t) r.sub2(t, r) qd -= 1 while r.int_dict[i] < qd: r.sub2(t, r) qd -= 1 if q is not None: r.dr_shift2(ys, q) if ts != ms: ZERO.sub2(q, q) r.int_dict['t'] = ys r.clamp() if nsh > 0: r.r_shift2(nsh, r) if ts < 0: ZERO.sub2(r, r) def pow_int(self, e, m): if e < 256 or m.is_even(): z = Classic(m) else: z = Montgomery(m) return self.exp(e, z) def is_even(self): return (self.int_dict[0] & 1 if self.int_dict['t'] > 0 else self.int_dict['s']) == 0 def inv_digit(self): if self.int_dict['t'] < 1: return 0 x = self.int_dict[0] if (x & 1) == 0: return 0 y = x & 3 y = (y * (2 - (x & 0xf) * y)) & 0xf y = (y * (2 - (x & 0xff) * y)) & 0xff y = (y * (2 - (((x & 0xffff) * y) & 0xffff))) & 0xffff y = (y * (2 - x * y % self.DV)) % self.DV return self.DV - y if y > 0 else -y def exp(self, e, z): if e > 0xffffffff or e < 1: return ONE r = BigInteger(None) r2 = BigInteger(None) g = z.convert(self) i = self.nbits(e) - 1 g.copy2(r) # g.copy() 方法前数据与浏览器一致 for k in range(i - 1, -1, -1): z.sqr2(r, r2) if e & (1 << k) > 0: z.mul2(r2, g, r) else: r, r2 = r2, r return z.revert(r) # 循环后参数 g, r2, self, z 与浏览器数据一致,r 数据不同 def mod(self, a): r = BigInteger(None) self.abs().rem2(a, None, r) if self.int_dict['s'] < 0 < r.compare2(ZERO): a.sub2(r, r) return r ZERO = BigInteger(None).nbv(0) ONE = BigInteger(None).nbv(1)
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/PyRsa/pyjsbn.py
pyjsbn.py
from typing import Dict, Union import requests from school_sdk.client.api.class_schedule import ScheduleClass from school_sdk.client.api.score import Score from school_sdk.client.api.user_info import Info from school_sdk.client.utils import user_is_login from school_sdk.config import URL_ENDPOINT from school_sdk.client.api.schedules import Schedule from school_sdk.client.exceptions import LoginException import time from school_sdk.client.api.login import ZFLogin from school_sdk.client.base import BaseUserClient class SchoolClient(): def __init__(self, host, port: int = 80, ssl: bool = False, name=None, exist_verify: bool = False, captcha_type: str = "captcha", retry: int = 10, lan_host=None, lan_port=80, timeout=10, login_url_path=None, url_endpoints=None) -> None: """初始化学校配置 Args: host (str): 主机地址 port (int, optional): 端口号. Defaults to 80. ssl (bool, optional): 是否启用HTTPS. Defaults to False. name (str, optional): 学校名称. Defaults to None. exist_verify (bool, optional): 是否有验证码. Defaults to False. captcha_type (str, optional): 验证码类型. Defaults to captcha. 滑块传入cap开头, 图片传入kap开头 与教务系统的url地址对应, 默认识别滑块验证码. retry (int, optional): 登录重试次数. Defaults to 10. lan_host (str, optional): 内网主机地址. Defaults to None. lan_port (int, optional): 内网主机端口号. Defaults to 80. timeout (int, optional): 请求超时时间. Defaults to 10. login_url_path ([type], optional): 登录地址. Defaults to None. url_endpoints ([dict], optional): 地址列表. Defaults to None. """ school = { "name": name, "exist_verify": exist_verify, "captcha_type": captcha_type, "retry": retry, "lan_host": lan_host, "lan_port": lan_port, "timeout": timeout, "login_url_path": login_url_path, "url_endpoints": url_endpoints or URL_ENDPOINT } self.base_url = f'https://{host}:{port}' if ssl else f'http://{host}:{port}' self.config: dict = school def user_login(self, account: str, password: str, **kwargs): """用户登录 Args: account (str): 用户账号 password (str): 用户密码 """ user = UserClient(self, account=account, password=password, **kwargs) return user.login() def init_dev_user(self, cookies: str = None): dev_user = UserClient(self, account="dev account", password="dev password") return dev_user.get_dev_user(cookies) class UserClient(BaseUserClient): schedule: Schedule = None score: Score = None info = None schedule_class: ScheduleClass = None def __init__(self, school: SchoolClient, account, password) -> None: """初始化用户类 用户类继承自学校 Args: school (SchoolClient): 学校实例 account (str): 账号 password (str): 密码 """ self.BASE_URL = school.base_url self.account = account self.password = password self.school: SchoolClient = school self._csrf = None self.t = int(time.time() * 1000) self._image = None def login(self): """用户登录,通过SchoolClient调用 """ user = ZFLogin(user_client=self) user.get_login() self._http = user._http return self def init_schedule(self): if self.schedule is None: self.schedule = Schedule(self) def set_schedule_time(self, schedule_time: dict): self.schedule.schedule_parse.set_schedule_time(schedule_time=schedule_time) def get_schedule(self, year: int, term: int = 1, schedule_time: dict = None, **kwargs): """获取课表""" kwargs.setdefault("year", year) kwargs.setdefault("term", term) if self.schedule is None: self.schedule = Schedule(self, schedule_time) return self.schedule.get_schedule_dict(**kwargs) def get_class_schedule(self, year: int, term:int = 1, **kwargs): self.schedule_class = ScheduleClass(self) self.schedule_class._get_raw(year=2021, term=1, **kwargs) return "dev" def get_score(self, year: int, term: int = 1, **kwargs): """获取成绩""" kwargs.setdefault("year", year) kwargs.setdefault("term", term) if self.score is None: self.score = Score(self) return self.score.get_score(**kwargs) def get_info(self, **kwargs): """获取个人信息""" if self.info is None: self.info = Info(self) return self.info.get_info(**kwargs) def refresh_info(self, **kwargs): self.info = None return self.get_info(**kwargs) def check_session(self) -> bool: url = self.school.config.get("url_endpoints")["INDEX_URL"] resp = self.get(url) try: if not user_is_login(self.account, resp.text): # 重新登录 # print('开始重新登陆') new_user = ZFLogin(user_client=self) new_user.get_login() self._http = new_user._http except LoginException as le: # print(le) raise LoginException( 400, f"重新登录出错: 账号{self.account}的 session 已过期, 重新登录失败: {str(le)}") return True # dev options def get_cookies(self): return self._http.cookies def set_cookies(self, cookies: str, **kwargs): """设置user cookies Args: cookies (str): Cookies 字符串 """ cookies = cookies.strip() key, value = cookies.split('=') self._http.cookies.set(key, value) def get_dev_user(self, cookies: str, **kwargs): self._http = requests.Session() self.set_cookies(cookies=cookies, **kwargs) return self def __repr__(self) -> str: return f'<school_sdk.client.UserClient account: {self.account}>'
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/client/__init__.py
__init__.py
from school_sdk.check_code.type import kaptcha_func, captcha_func from school_sdk.client.exceptions import LoginException, RTKException from school_sdk.check_code import ZFCaptchaDistinguish from school_sdk.client.api import BaseCrawler from school_sdk.PyRsa.pyb64 import Base64 from school_sdk.PyRsa import RsaKey from pyquery import PyQuery as pq import time import re import json import base64 class ZFLogin(BaseCrawler): LOGIN_EXTEND = b'{"appName":"Netscape","userAgent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36","appVersion":"5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"}' def get_login(self, **kwargs): """对外登录接口 1. 获取csrf token 和 原始的cookie 2. 获取rsa加密公钥 3. 通过学校配置,决定是否启用验证码验证 4. 发起登录请求 Raises: LoginException: 登录失败提示 """ self.get_raw_csrf_and_cookie() # 获取csrf与cookie self.get_rsa_publick_key() # 获取rsa公钥 if self.school.config['exist_verify']: # 处理验证码 if self.captcha_type.startswith("cap"): # 滑块验证码 for _ in range(self.retry): if self.verification_captcha(): break if not self._post_login(): raise LoginException("xxx", "滑块登录失败") return True if self.captcha_type.startswith('kap'): # 图形识别验证码 for i in range(self.retry): verify_code = self.verification_kaptcha() # print(f'第{i}次验证, 识别结果: {verify_code}') is_login = self._kaptcha_login(verify_code=verify_code) if is_login: return is_login raise LoginException("xxx", "验证码登录失败") else: # 没有验证码登录 if self._post_login(): return True raise LoginException("xxx", "登录失败") def __init__(self, user_client) -> None: super().__init__(user_client) self.password = self.user_client.password self._csrf = None self._b64 = Base64() self._image = None self.path = self.school.config["url_endpoints"]['LOGIN'] self.captcha_type:str = self.school.config['captcha_type'] self.retry:int = self.school.config["retry"] def get_raw_csrf_and_cookie(self): """获取CSRF令牌 """ url = self.path['INDEX'] res = self.get(url) doc = pq(res.text) csrf = doc("#csrftoken").attr("value") self._csrf = csrf def get_rsa_publick_key(self): """获取RSA公钥信息 Returns: str: return RSA moduls and exponent """ url = self.path["PUBLIC_KEY"] params = {"time": self.t, "_": self.t} headers = self.generate_headers() res = self.get(url=url, params=params, headers=headers) result_json = res.json() return result_json.get("modulus"), result_json.get('exponent') def verification_captcha(self) -> bool: """滑块验证 1. 获取图片 2. 获取验证偏移量 3. 发起验证请求 """ rtk = self._get_rtk() self._image = self._get_captcha_image() cap = ZFCaptchaDistinguish(self._image, captcha_func) x, y = cap.verify() track = self._get_track(x, y) captcha_verify_result = json.dumps(track).encode('utf-8') url = self.path["CAPTCHA"] data = { "instanceId": "zfcaptchaLogin", "rtk": rtk, "time": int(time.time() * 1000), "mt": base64.b64encode(captcha_verify_result), "extend": base64.b64encode(self.LOGIN_EXTEND), "type": "verify" } res = self.post(url=url, data=data) if res.status_code == 200 and res.json().get("status") == "success": return True return False def verification_kaptcha(self) -> str: """图形验证码识别""" # 下载验证码 self._image = self._get_kaptcha() cap = ZFCaptchaDistinguish(self._image, kaptcha_func) return cap.verify() def _get_kaptcha(self) -> bytes: params = {"time": self.t} url = self.path['KCAPTCHA'] res = self.get(url, params=params) if res.status_code == 200: return res.content def _kaptcha_login(self, verify_code:str) -> bool: """发送登录请求 Returns: bool: 是否登录成功 """ rsa_key = RsaKey() m, e = self.get_rsa_publick_key() rsa_key.set_public(self._b64.b64tohex(m), self._b64.b64tohex(e)) rr = rsa_key.rsa_encrypt(self.password) data = { 'csrftoken': self._csrf, 'language': 'zh_CN', 'yhm': self.account, 'mm': self._b64.hex2b64(rr), 'yzm': verify_code } params = {"time": self.t} url = self.path['INDEX'] res = self.post(url, params=params, data=data) return self._is_login(res.text) def _post_login(self) -> bool: """发送登录请求 Returns: bool: 是否登录成功 """ rsa_key = RsaKey() m, e = self.get_rsa_publick_key() rsa_key.set_public(self._b64.b64tohex(m), self._b64.b64tohex(e)) rr = rsa_key.rsa_encrypt(self.password) data = { 'csrftoken': self._csrf, 'yhm': self.account, 'mm': self._b64.hex2b64(rr) } params = {"time": self.t} url = self.path['INDEX'] res = self.post(url, params=params, data=data) return self._is_login(res.text) def _is_login(self, html) -> bool: """工具函数,判断是否登录成功 Args: html (str): html string. Returns: bool: html string 是否存在用户 """ re_str = f'value="{self.account}"' result = re.search(re_str, html) if result: return True # 错误流程 doc = pq(html) err_msg = doc('#tips').text() if '验证码' in err_msg: return False raise LoginException(400, err_msg) def _get_captcha_image(self): """获取验证码 1. 获取rtk、si、imtk等信息 2. 下载图片 """ params = { "type": "refresh", "time": {self.t}, "instanceId": "zfcaptchaLogin" } url = self.path["CAPTCHA"] res = self.get(url, params=params) captcha_data = res.json() params.update({ "type": "image", "imtk": captcha_data.get("imtk"), "id": captcha_data.get("si") }) url = self.path["CAPTCHA"] res = self.get(url=url, params=params) if res.status_code == 200: return res.content def _get_rtk(self) -> str: """获取rtk 从JavaScript文件中提前rtk """ url = self.path['CAPTCHA'] params = { "type": "resource", "instanceId": "zfcaptchaLogin", "name": "zfdun_captcha.js" } res = self.get(url, params=params) result = re.search("tk:'(.*)',", res.text) try: return result.group(1) except: raise RTKException("rtk解析错误") def _get_track(self, distance, y) -> list: """模拟人手滑动 通过设置前快后慢的加速度,模拟人手滑动 Args: distance ([int]): [移动距离] y ([int]): [滑块Y值] Returns: [list]: [坐标数组] """ start = 1200 current = 0 track = [] # 减速阈值 mid = distance * 4 / 5 # 计算间隔 t = 0.2 # 初速度 v = 0 while current < distance: # 加速->加速度为 2; 减速->加速度为-3 a = 2 if current < mid else -3 v0 = v # 当前速度 v = v0 + at v = v0 + a * t # 移动距离 x = v0t + 1/2 * a * t^2 move = v0 * t + 1 / 2 * a * t * t # 当前位移量 current += move # 加入轨迹 track.append({"x": start + int(current), "y": y, "t": int(time.time() * 1000)}) time.sleep(0.01) return track
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/client/api/login.py
login.py
import re class ScheduleParse(): __SCHEDULE_TIME = { "1": [8, 30], "2": [9, 20], "3": [10, 25], "4": [11, 15], "5": [14, 40], "6": [15, 30], "7": [16, 30], "8": [17, 20], "9": [19, 30], "10": [20, 20] } def __init__(self, content=None, schedule_time:dict=None) -> None: self.raw = content self.parse_list:list = [] self.parse_dict:dict = {} self.parse_ics = None if schedule_time != None: self.SCHEDULE_TIME = schedule_time or self.__SCHEDULE_TIME def set_schedule_time(self, schedule_time:dict): self.SCHEDULE_TIME = schedule_time or self.__SCHEDULE_TIME def get_dict(self): return self.parse_dict def get_list(self): return self.parse_list def load(self, content): """初始化 Args: content (string): 课表原始数据 """ self.raw = content self._parse() def _get_color(self, index): t = ['green', 'blue', 'purple', 'red', 'yellow'] return t[index] def get_color(self, compare_target, compare_list): for item in compare_list: if compare_target.get('kcmc') == item.get("course") and compare_target.get('kcmc') != None: return item.get('color') # 随机返回颜色 time = compare_target.get('jcs').split('-') return self._get_color((int(compare_target.get('xqj')) * 3 + int(time[0]) + 1) % 5) def _parse(self): """解析课表 姓名、班级、课程、时间、地点、校区、节数、周数等详细信息 """ self.parse_list:list = [] self.parse_dict:dict = {} self.parse_ics = None user_message: dict = self.raw.get("xsxx") schedule_list: list = self.raw.get("kbList") # 用户基本信息 user_class_name = user_message.get("BJMC") username = user_message.get("XM") # get schedule items for course in schedule_list: weeks_arr = self.get_course_week(course.get('zcd')) time_text = f"{course.get('xqjmc')} {course.get('jc')}" time = self.get_class_time(course.get('jcs')) color = self.get_color(course, self.parse_list) self.parse_list.append({ "course": course.get('kcmc', "找不到课程名"), "place": course.get('cdmc', "找不到上课地点"), "campus": course.get('xqmc', "南城"), "teacher": course.get('xm'), "weeks_text": course.get('zcd'), "week_day": course.get('xqj'), "week_day_text": course.get('xqjmc'), "time_text": time_text, "weeks_arr": weeks_arr, "time": time, "color": color, "section": course.get('jcs') }) self.parse_dict.setdefault("class_name", user_class_name) self.parse_dict.setdefault("username", username) self.parse_dict.setdefault("course_list", self.parse_list) def get_class_time(self, b2e:str): """获取课程的开始和结束的上课时间 如某课程为早上一二节(1-2), 则开始时间为第一节的时间, 结束时间为第二节课的上课时间。 注意:是开始和结束的`上课时间` e.g: 第一二节为8.30-9.15, 中间休息5分钟, 9.20-10.05 返回: { 'start': [8, 30], 'last': [9, 20] } Args: b2e (str): 原始范围字符串, 如`1-2` Returns: [type]: 课程开始和课程结束的时间 """ start, end = b2e.split('-') start_time = self.SCHEDULE_TIME[start] end_time = self.SCHEDULE_TIME[end] return {"start": start_time, "last": end_time} def get_course_week(self, week_text: str) -> list: """获得这门课程一共要上的详细周数 Args: zcd (str): zcd正方原始数据对应的key名 Returns: list: 要上该门课的星期 """ interval = week_text.split(",") weeks = [] for week in interval: leap = 1 if "(单)" in week or "(双)" in week: week = week.replace("(双)", "") week = week.replace("(单)", "") leap = 2 re_result = re.search("(\d+).?(\d*).*", week) real = re_result.groups() if real[-1] == '': weeks += [int(real[0])] else: # for start to end week weeks += [i for i in range( int(real[0]), int(real[1]) + 1, leap)] return weeks
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/client/api/schedule_parse.py
schedule_parse.py
from school_sdk.client.api.schedule_parse import ScheduleParse from school_sdk.client.api import BaseCrawler from school_sdk.client.exceptions import LoginException from school_sdk.client.utils import user_is_login class Schedule(BaseCrawler): year = None term = None def __init__(self, user_client, schedule_time:dict = None) -> None: """课表类 Args: user_client (UserClient): 已登录用户实例 """ super().__init__(user_client=user_client) self.raw_schedule = None self.schedule = None self.schedule_parse: ScheduleParse = ScheduleParse(schedule_time=schedule_time) def refresh_schedule(self): """刷新课表数据 """ self.raw_schedule = None self.schedule = None self.load_schedule() def get_schedule_dict(self, **kwargs) -> dict: """获取解析后的课表数据 Returns: dict: 解析后的课表数据 """ if not self.is_load_schedule(): self.load_schedule(**kwargs) if kwargs.get("year") != self.year or kwargs.get("term") != self.term: self.load_schedule(**kwargs) return self.schedule_parse.get_dict() def get_schedule_list(self, **kwargs): """获取解析后的课表列表 仅课表列表 Returns: list: 仅课表列表 """ if not self.is_load_schedule(): self.load_schedule() return self.schedule_parse.get_list() def get_raw_schedule(self, **kwargs): """获取原始课表数据 Returns: [json]: 原始课表数据 """ if self.raw_schedule is None: self.load_schedule() return self.raw_schedule def parse_ics(self): """解析成ics日历格式 """ pass def is_load_schedule(self): return False if self.raw_schedule is None else True def load_schedule(self, **kwargs): """加载课表 """ self.raw_schedule = self._get_student_schedule(**kwargs) self.schedule_parse.load(self.raw_schedule) def _get_student_schedule(self, year, term, **kwargs): self.year = year self.term = term params = { "gnmkdm": "N2151", "su": self.account } data = { "xnm": year, "xqm": self.TERM.get(term, 1), "kzlx": "ck" } url = self.school.config['url_endpoints']['SCHEDULE']['API'] res = self.post(url=url, params=params, data=data, **kwargs) # print(res.text, res, res.status_code) if user_is_login(self.account, res.text): return res.json() raise LoginException()
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/client/api/schedules.py
schedules.py
# cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209 from school_sdk.client.api import BaseCrawler class Score(BaseCrawler): year = None term = None def __init__(self, user_client) -> None: super().__init__(user_client) self.endpoints: dict = self.school.config['url_endpoints'] self.raw_score = None self.score_dict:dict = {} self.score_list:list = [] def get_score(self, **kwargs): return self.get_score_dict(**kwargs) def get_score_list(self, **kwargs): """获取成绩清单-列表 Returns: list: 成绩列表 """ if not self.score_list: self.parse(**kwargs) return self.score_list def get_score_dict(self, **kwargs): """获取成绩清单-字典 Returns: dict: 成绩字典清单 """ if not self.score_dict: self.parse(**kwargs) if kwargs.get('year') != self.year or kwargs.get('term') != self.term: self.raw_score = None self.parse(**kwargs) return self.score_dict def parse(self, **kwargs): """解析数据 """ if self.raw_score is None: self.load_score(**kwargs) self._parse(self.raw_score) def load_score(self, **kwargs) -> None: """加载课表 """ self.raw_score = self._get_score(**kwargs) def _get_score(self, year: int, term: int = 1, **kwargs): """获取教务系统成绩 Args: year (int): 学年 term (int, optional): 学期. Defaults to 1. Returns: json: json数据 """ self.year = year self.term = term url = self.endpoints['SCORE']['API'] params = { 'doType': 'query', 'gnmkdm': 'N305005', 'su': self.account } data = { 'xnm': year, 'xqm': self.TERM.get(term, 3), '_search': False, 'nd': self.t, 'queryModel.showCount': 500, 'queryModel.currentPage': 1, 'queryModel.sortName': None, 'queryModel.sortOrder': 'asc', 'time': 4, } res = self.post(url=url, params=params, data=data, **kwargs) return res.json() def _parse(self, raw: dict): # kcmc -> 课程名称 # kch -> 课程号 # kcxzmc -> 课程性质名称 # kcbj -> 课程标记 # jsxm -> 教师姓名 # tjsj -> 提交时间 # khfsmc -> 考核方式 # ksxz -> 考试性质 # cj -> 成绩 # bfzcj -> 百分制成绩 # xf -> 学分 # kkbmmc -> 开课部门名称 # njdm_id -> 年级代码 # jd -> 绩点 # bzxx -> 备注信息 """解析教务系统成绩 Args: raw (dict): 教务系统的原始数据 """ self.score_dict:dict = {} self.score_list:list = [] items = raw.get('items') for item in items: format_item = { "course_name": item.get('kcmc'), # kcmc -> 课程名称 "course_code": item.get('kch'), # kch -> 课程号 'course_nature': item.get('kcxzmc'), # kcxzmc -> 课程性质名称 'course_target': item.get('kcbj'), # kcbj -> 课程标记 'teacher': item.get('jsxm'), # jsxm -> 教师姓名 'submitted_at': item.get('tjsj'), # tjsj -> 提交时间 'exam_method': item.get('khfsmc'), # khfsmc -> 考核方式 'exam_nature': item.get('ksxz'), # ksxz -> 考试性质 'exam_result': item.get('cj'), # cj -> 成绩 'exam_score': item.get('bfzcj'), # bfzcj -> 百分制成绩 'credit': item.get('xf'), # xf -> 学分 'course_group': item.get('kkbmmc'), # kkbmmc -> 开课部门名称 'grade': item.get('njdm_id'), # njdm_id -> 年级代码 'grade_point': item.get('jd'), # jd -> 绩点 'reason': item.get('bzxx') # bzxx -> 备注信息 } self.score_list.append(format_item) self.score_dict.setdefault(item.get('kcmc'), format_item)
zf-school-sdk
/zf-school-sdk-1.5.0.tar.gz/zf-school-sdk-1.5.0/school_sdk/client/api/score.py
score.py
import binascii import rsa import base64 import requests from bs4 import BeautifulSoup from Crypto.PublicKey import RSA from Crypto.Cipher import PKCS1_OAEP, PKCS1_v1_5 from urllib import parse class Login(object): def __init__(self, base_url): self.base_url = base_url self.key_url = parse.urljoin(base_url, 'xtgl/login_getPublicKey.html') self.login_url = parse.urljoin(base_url, 'xtgl/login_slogin.html') self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'Referer': self.login_url} self.sess = requests.Session() self.cookies = '' self.cookies_str = '' def login(self, sid, password): """登陆""" req = self.sess.get(self.login_url, headers=self.headers) # print(req.text) soup = BeautifulSoup(req.text, 'lxml') tokens = soup.find(id='csrftoken').get("value") res = self.sess.get(self.key_url, headers=self.headers).json() n = res['modulus'] e = res['exponent'] hmm = self.get_rsa(password, n, e) login_data = {'csrftoken': tokens, 'yhm': sid, 'mm': hmm, 'mm': hmm} self.sess.post(self.login_url, headers=self.headers, data=login_data) self.cookies = self.sess.cookies self.cookies_str = '; '.join([item.name + '=' + item.value for item in self.cookies]) @classmethod def encrypt_sqf(cls, pkey, str_in): """加载公钥""" private_key = pkey private_keybytes = base64.b64decode(private_key) prikey = RSA.importKey(private_keybytes) signer = PKCS1_v1_5.new(prikey) signature = base64.b64encode(signer.encrypt(str_in.encode("utf-8"))) return signature @classmethod def get_rsa(cls, pwd, n, e): """对密码base64编码""" message = str(pwd).encode() rsa_n = binascii.b2a_hex(binascii.a2b_base64(n)) rsa_e = binascii.b2a_hex(binascii.a2b_base64(e)) key = rsa.PublicKey(int(rsa_n, 16), int(rsa_e, 16)) encropy_pwd = rsa.encrypt(message, key) result = binascii.b2a_base64(encropy_pwd) return result
zfapi
/api/login.py
login.py
from bs4 import BeautifulSoup import re import time import requests from urllib import parse class GetInfo(object): def __init__(self, base_url, cookies): self.base_url = base_url self.headers = { 'Referer': base_url, 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' } self.cookies = cookies def get_information(self): """获取个人信息""" url = parse.urljoin(self.base_url, 'xsxxxggl/xsxxwh_cxCkDgxsxx.html?gnmkdm=N100801') # print(url) res = requests.get(url, headers=self.headers, cookies=self.cookies) jres = res.json() # print(jres) res_dict = { 'name': jres['xm'], 'studentId': jres['xh'], 'sex':jres['xbm'], 'birthplace':jres['jg'], 'email':jres['dzyx'], 'telphone':jres['gddh'], 'brithday': jres['csrq'], 'idNumber': jres['zjhm'], 'candidateNumber': jres['ksh'], 'bankcard' : jres['yhkh'], 'status': jres['xjztdm'], 'collegeName': jres['zsjg_id'], 'majorName': jres['zszyh_id'], 'className': jres['bh_id'], 'entryDate': jres['rxrq'], 'graduationSchool': jres['byzx'], 'domicile': jres['jtdz'], 'politicalStatus': jres['zzmmm'], 'national': jres['mzm'], 'education': jres['pyccdm'], 'postalCode': jres['yzbm'] } return res_dict def get_information_raw(self): """获取原始正方数据个人信息""" url = parse.urljoin(self.base_url, 'xsxxxggl/xsxxwh_cxCkDgxsxx.html?gnmkdm=N100801') # print(url) res = requests.get(url, headers=self.headers, cookies=self.cookies) jres = res.json() return jres def get_notice(self): """获取通知""" url_0 = parse.urljoin(self.base_url, 'xtgl/index_cxNews.html?localeKey=zh_CN&gnmkdm=index') url_1 = parse.urljoin(self.base_url, 'xtgl/index_cxAreaTwo.html?localeKey=zh_CN&gnmkdm=index') res_list = [] url_list = [] res_0 = requests.get(url_0, headers=self.headers, cookies=self.cookies) res_1 = requests.get(url_1, headers=self.headers, cookies=self.cookies) soup_0 = BeautifulSoup(res_0.text, 'lxml') soup_1 = BeautifulSoup(res_1.text, 'lxml') url_list += [i['href'] for i in soup_0.select('a[href*="/xtgl/"]')] url_list += [i['href'] for i in soup_1.select('a[href*="/xtgl/"]')] for u in url_list: # print(parse.urljoin(self.base_url,u)) _res = requests.get(parse.urljoin(self.base_url,u), headers=self.headers, cookies=self.cookies) _soup = BeautifulSoup(_res.text, 'lxml') title = _soup.find(attrs={'class': 'text-center'}).string info = [i.string for i in _soup.select_one('[class="text-center news_title1"]').find_all('span')] publisher = re.search(r':(.*)', info[0]).group(1) ctime = re.search(r':(.*)', info[1]).group(1) vnum = re.search(r':(.*)', info[2]).group(1) detailed = _soup.find(attrs={'class': 'news_con'}) content = ''.join(list(detailed.strings)) doc_urls = [self.base_url + i['href'][2:] for i in detailed.select('a[href^=".."]')] res_list.append({ 'title': title, 'publisher': publisher, 'ctime': ctime, 'vnum': vnum, 'content': content, 'doc_urls': doc_urls }) return res_list def get_message(self): """获取消息""" url = parse.urljoin(self.base_url, 'xtgl/index_cxDbsy.html?doType=query') data = { 'sfyy': '0', # 是否已阅,未阅为1,已阅为2 'flag': '1', '_search': 'false', 'nd': int(time.time()*1000), 'queryModel.showCount': '1000', # 最多条数 'queryModel.currentPage': '1', # 当前页数 'queryModel.sortName': 'cjsj', 'queryModel.sortOrder': 'desc', # 时间倒序, asc正序 'time': '0' } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() res_list = [{'message': i['xxnr'], 'ctime': i['cjsj']} for i in jres['items']] return res_list def get_message_raw(self): """获取原始正方系统消息json信息""" url = parse.urljoin(self.base_url, 'xtgl/index_cxDbsy.html?doType=query') data = { 'sfyy': '0', # 是否已阅,未阅为1,已阅为2 'flag': '1', '_search': 'false', 'nd': int(time.time()*1000), 'queryModel.showCount': '1000', # 最多条数 'queryModel.currentPage': '1', # 当前页数 'queryModel.sortName': 'cjsj', 'queryModel.sortOrder': 'desc', # 时间倒序, asc正序 'time': '0' } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() return jres def get_grade(self, year, term): """获取成绩""" url = parse.urljoin(self.base_url, 'cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' elif term == '0': term = '' else: print('Please enter the correct term value!!! ("0" or "1" or "2")') return {} data = { 'xnm': year, # 学年数 'xqm': term, # 学期数,第一学期为3,第二学期为12, 整个学年为空'' '_search': 'false', 'nd': int(time.time()*1000), 'queryModel.showCount': '100', # 每页最多条数 'queryModel.currentPage': '1', 'queryModel.sortName': '', 'queryModel.sortOrder': 'asc', 'time': '0' # 查询次数 } res = requests.post(url = url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() if jres.get('items'): # 防止数据出错items为空 res_dict = { 'name': jres['items'][0]['xm'], 'studentId': jres['items'][0]['xh'], 'schoolYear': jres['items'][0]['xnm'], 'schoolTerm': jres['items'][0]['xqmmc'], 'course': [{ 'courseTitle': i['kcmc'], 'teacher': i['jsxm'], 'courseId': i['kch_id'], 'className': i['jxbmc'], 'courseNature': ''if i.get('kcxzmc')== None else i.get('kcxzmc'), 'credit': i['xf'], 'grade': i['cj'], 'gradePoint': '' if i.get('jd') == None else i.get('jd'), 'gradeNature': i['ksxz'], 'startCollege': i['kkbmmc'], 'courseMark': i['kcbj'], 'courseCategory': i['kclbmc'], 'courseAttribution': '' if i.get('kcgsmc') == None else i.get('kcgsmc') } for i in jres['items']]} return res_dict else: return {} def get_grade_raw(self, year, term): """获取正方教务成绩json文件""" url = parse.urljoin(self.base_url, 'cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' elif term == '0': term = '' else: print('Please enter the correct term value!!! ("0" or "1" or "2")') return {} data = { 'xnm': year, # 学年数 'xqm': term, # 学期数,第一学期为3,第二学期为12, 整个学年为空'' '_search': 'false', 'nd': int(time.time()*1000), 'queryModel.showCount': '100', # 每页最多条数 'queryModel.currentPage': '1', 'queryModel.sortName': '', 'queryModel.sortOrder': 'asc', 'time': '0' # 查询次数 } res = requests.post(url = url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() return jres def get_schedule(self, year, term): """获取课程表信息""" url = parse.urljoin(self.base_url, 'kbcx/xskbcx_cxXsKb.html?gnmkdm=N2151') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' else: print('Please enter the correct term value!!! ("1" or "2")') return {} data = { 'xnm': year, 'xqm': term } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) # print(res.text) jres = res.json() # print(jres) res_dict = { 'name': jres['xsxx']['XM'], 'studentId': jres['xsxx']['XH'], 'schoolYear': jres['xsxx']['XNM'], 'schoolTerm': jres['xsxx']['XQMMC'], 'normalCourse': [{ 'courseTitle': i['kcmc'], 'teacher': i['xm'], 'courseId': i['kch_id'], 'courseSection': i['jc'], 'courseWeek': i['zcd'], 'campus': i['xqmc'], 'courseRoom': i['cdmc'], 'className': i['jxbmc'], 'hoursComposition': i['kcxszc'], 'weeklyHours': i['zhxs'], 'totalHours': i['zxs'], 'credit': i['xf'], 'classtype': i['kclb'], 'examtype': i['khfsmc'] }for i in jres['kbList']], 'otherCourses': [i['qtkcgs'] for i in jres['sjkList']]} return res_dict def get_schedule_raw(self, year, term): """获取原始正方系统课程表信息json信息""" url = parse.urljoin(self.base_url, 'kbcx/xskbcx_cxXsKb.html?gnmkdm=N2151') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' else: print('Please enter the correct term value!!! ("1" or "2")') return {} data = { 'xnm': year, 'xqm': term } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() return jres def get_exam(self, year, term): """获取考试信息""" url = parse.urljoin(self.base_url, 'kwgl/kscx_cxXsksxxIndex.html?doType=query&gnmkdm=N358105') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' else: print('Please enter the correct term value!!! ("1" or "2")') return {} data = { 'xnm': year, # 学年数 'xqm': term, # 学期数,第一学期为3,第二学期为12 '_search': 'false', 'nd': int(time.time() * 1000), 'queryModel.showCount': '100', # 每页最多条数 'queryModel.currentPage': '1', 'queryModel.sortName': '', 'queryModel.sortOrder': 'asc', 'time': '0' # 查询次数 } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() if jres.get('items'): # 防止数据出错items为空 res_dict = { 'name': jres['items'][0]['xm'], 'studentId': jres['items'][0]['xh'], 'schoolYear': jres['items'][0]['xnmc'][:4], 'schoolTerm': jres['items'][0]['xqmmc'], 'exams': [{ 'courseTitle': i['kcmc'], 'teacher': i['jsxx'], 'courseId': i['kch'], 'reworkMark': i['cxbj'], 'selfeditingMark': i['zxbj'], 'examName': i['ksmc'], 'paperId': i['sjbh'], 'examTime': i['kssj'], 'eaxmLocation': i['cdmc'], 'campus': i['xqmc'] } for i in jres['items']]} return res_dict else: return {} def get_exam_raw(self, year, term): """获取正方系统考试信息json""" url = parse.urljoin(self.base_url, 'kwgl/kscx_cxXsksxxIndex.html?doType=query&gnmkdm=N358105') if term == '1': # 修改检测学期 term = '3' elif term == '2': term = '12' else: print('Please enter the correct term value!!! ("1" or "2")') return {} data = { 'xnm': year, # 学年数 'xqm': term, # 学期数,第一学期为3,第二学期为12 '_search': 'false', 'nd': int(time.time() * 1000), 'queryModel.showCount': '100', # 每页最多条数 'queryModel.currentPage': '1', 'queryModel.sortName': '', 'queryModel.sortOrder': 'asc', 'time': '0' # 查询次数 } res = requests.post(url, headers=self.headers, data=data, cookies=self.cookies) jres = res.json() return jres
zfapi
/api/get_info.py
get_info.py
![ZFC](doc/zfc_card.png) # ZFC # ZFC is a software to calculate fold change z score of screening data. ZFC can used for CRISPR library screening with or without [ibar][1], with or without replicates. Please cite us [(Xu et al., 2021)][2]: * Xu, P., Liu, Z., Liu, Y., Ma, H., Xu, Y., Bao, Y., Zhu, S., Cao, Z., Wu, Z., Zhou, Z., et al. (**2021**). Genome-wide interrogation of gene functions through base editor screens empowered by barcoded sgRNAs. *Nat Biotechnol*. ## Dependency ## ZFC is designed with python3 and requires packages that are available in Linux, Mac OS, and Windows. * Python3.x * numpy >= 1.10 * scipy >= 1.0 * pandas >= 0.16 * matplotlib >= 2.0 * sklearn >= 0.20 ## Installation ## Clone this repository, then install the software. ```{shell} $ git clone https://github.com/wolfsonliu/zfc.git $ cd zfc $ python3 setup.py install ``` Or from pypi: ```{shell} $ pip install --user zfc ``` It's advised to use [virtualenv](https://virtualenv.pypa.io/en/stable/) or other software to create virual environment for the ZFC software. ## Usage ## The help of ZFC software: ```{shell} usage: zfc [-h] [-i INPUT] [-o OUTPREFIX] [--punish-rate PUNISH_RATE] [--n-sd N_SD] [--null-iteration NULL_ITERATION] [--plot] [--version] Calculate fold change of screening data (zscore log2 fold change). optional arguments: -h, --help show this help message and exit -i INPUT, --input INPUT Raw count table with header should include: <gene>, <guide>, <barcode>, <ctrl>, <exp>.<ctrl> is the raw counts of control group, <exp> is the raw counts of treatment group. For screening without barcode, the barcode column can be the same with guide. -o OUTPREFIX, --outprefix OUTPREFIX Output file prefix, can be the file directory path with file name prefix. The directory in the outprefix should be built before analysis. --normalization {total,median,quantile,median_ratio,none} Normalization of raw count data, default is total. Support method: total (Total sum normalization); median (Median normalization); quantile (Upper quantile normalization (0.75)); median_ratio (Median ratio normalization); none (No normalization). --top-n-sgrna TOP_N_SGRNA Only consider top n barcodes for each sgRNA. Default to use all the data. --top-n-gene TOP_N_GENE Only consider top n barcodes for each gene. Default to use all the data. --null-iteration NULL_ITERATION The iteration to generate null distribution in calculating the p value of genes. The larger the better, but slower in calculation, default to be 100. --plot Output figures. --version show program's version number and exit ``` ## Example ## * For screening data with replicates but without ibar, you should first format the replicates as barcode data and then use zfc to calculate. ```{shell} gene guide barcode ctrl exp A1BG AAGAGCGCCTCGGTCCCAGC R_A 213 0 A1BG AAGAGCGCCTCGGTCCCAGC R_B 213 1.03341 A1BG AAGAGCGCCTCGGTCCCAGC R_C 213 49.2844 A1BG CAAGAGAAAGACCACGAGCA R_A 647 679.474 A1BG CAAGAGAAAGACCACGAGCA R_B 647 295.554 A1BG CAAGAGAAAGACCACGAGCA R_C 647 472.941 A1BG CACCTTCGAGCTGCTGCGCG R_A 469 190.335 A1BG CACCTTCGAGCTGCTGCGCG R_B 469 62.0044 A1BG CACCTTCGAGCTGCTGCGCG R_C 469 280.542 A1BG CACTGGCGCCATCGAGAGCC R_A 678 188.288 A1BG CACTGGCGCCATCGAGAGCC R_B 678 165.345 A1BG CACTGGCGCCATCGAGAGCC R_C 678 202.824 A1BG GCTCGGGCTTGTCCACAGGA R_A 559 333.597 A1BG GCTCGGGCTTGTCCACAGGA R_B 559 103.341 A1BG GCTCGGGCTTGTCCACAGGA R_C 559 409.44 A1BG TGGACTTCCAGCTACGGCGC R_A 363 176.008 A1BG TGGACTTCCAGCTACGGCGC R_B 363 307.955 A1BG TGGACTTCCAGCTACGGCGC R_C 363 254.952 ``` * For screening data with ibar but without replicates, you can use the software directely. ```{shell} gene guide barcode ctrl exp A1BG ACTTCCAGCTGTTCAAGAAT CTCGCT 597.0 659.0 A1BG ACTTCCAGCTGTTCAAGAAT GATGGT 1038.0 1233.0 A1BG ACTTCCAGCTGTTCAAGAAT GCACTG 884.0 855.0 A1BG ACTTCCAGCTGTTCAAGAAT TCCACT 807.0 870.0 A1BG CGAGAGCCAGGGAGCAGGCA CTCGCT 777.0 948.0 A1BG CGAGAGCCAGGGAGCAGGCA GATGGT 1448.0 1385.0 A1BG CGAGAGCCAGGGAGCAGGCA GCACTG 1225.0 1205.0 A1BG CGAGAGCCAGGGAGCAGGCA TCCACT 1030.0 1196.0 A1BG GACTTCCAGCTGTTCAAGAA CTCGCT 448.0 252.0 A1BG GACTTCCAGCTGTTCAAGAA GATGGT 685.0 700.0 A1BG GACTTCCAGCTGTTCAAGAA GCACTG 487.0 513.0 A1BG GACTTCCAGCTGTTCAAGAA TCCACT 383.0 409.0 ``` * For screening data with ibar and replicates, you can analysis replicates separately, or make the replicates as ibar for analysis. ```{shell} A1BG ACTTCCAGCTGTTCAAGAAT CTCGCT-R_1 402.548 399.666 A1BG ACTTCCAGCTGTTCAAGAAT CTCGCT-R_2 399.486 435.624 A1BG ACTTCCAGCTGTTCAAGAAT GATGGT-R_1 699.908 738.835 A1BG ACTTCCAGCTGTTCAAGAAT GATGGT-R_2 675.699 703.222 A1BG ACTTCCAGCTGTTCAAGAAT GCACTG-R_1 596.068 785.816 A1BG ACTTCCAGCTGTTCAAGAAT GCACTG-R_2 675.039 655.51 A1BG ACTTCCAGCTGTTCAAGAAT TCCACT-R_1 544.148 565.71 A1BG ACTTCCAGCTGTTCAAGAAT TCCACT-R_2 588.023 558.014 A1BG CGAGAGCCAGGGAGCAGGCA CTCGCT-R_1 523.92 643.584 A1BG CGAGAGCCAGGGAGCAGGCA CTCGCT-R_2 605.821 514.451 A1BG CGAGAGCCAGGGAGCAGGCA GATGGT-R_1 976.365 1022.66 A1BG CGAGAGCCAGGGAGCAGGCA GATGGT-R_2 922.905 1006.78 A1BG CGAGAGCCAGGGAGCAGGCA GCACTG-R_1 826 778.093 A1BG CGAGAGCCAGGGAGCAGGCA GCACTG-R_2 883.352 845.664 A1BG CGAGAGCCAGGGAGCAGGCA TCCACT-R_1 694.514 738.835 A1BG CGAGAGCCAGGGAGCAGGCA TCCACT-R_2 777.218 842.898 ``` * For screening data without ibar and replicates, you can assign guide column as barcode column for analysis, which means each guide RNA have only one ibar. ```{shell} gene guide barcode ctrl exp A1BG CACCTTCGAGCTGCTGCGCG CACCTTCGAGCTGCTGCGCG 469 125 A1BG AAGAGCGCCTCGGTCCCAGC AAGAGCGCCTCGGTCCCAGC 213 0 A1BG TGGACTTCCAGCTACGGCGC TGGACTTCCAGCTACGGCGC 363 119 A1BG CACTGGCGCCATCGAGAGCC CACTGGCGCCATCGAGAGCC 678 122 A1BG GCTCGGGCTTGTCCACAGGA GCTCGGGCTTGTCCACAGGA 559 212 A1BG CAAGAGAAAGACCACGAGCA CAAGAGAAAGACCACGAGCA 647 464 A1CF CGTGGCTATTTGGCATACAC CGTGGCTATTTGGCATACAC 898 322 A1CF GGTATACTCTCCTTGCAGCA GGTATACTCTCCTTGCAGCA 199 94 A1CF GACATGGTATTGCAGTAGAC GACATGGTATTGCAGTAGAC 271 118 A1CF GAGTCATCGAGCAGCTGCCA GAGTCATCGAGCAGCTGCCA 158 33 A1CF GGTGCAGCATCCCAACCAGG GGTGCAGCATCCCAACCAGG 195 25 A1CF CCAAGCTATATCCTGTGCGC CCAAGCTATATCCTGTGCGC 353 367 ``` ## Algorithm ## The ZFC analysis algorithm adopts the z-score of log2 fold change as the judgement of the sgRNA and gene changes between reference group (without treatment) and experiment group (with treatment). ZFC supports screening with [iBAR][1] employed, as well as conventional screening with replicates. The sgRNA with replicates and sgRNA-iBAR is treated with similar procedure. ### Step 1: Normalization of raw counts ### We use total counts for the normalization of raw counts, to rectify the batch sequencing deptch. Because some sgRNAs in the reference have very low raw counts, which can affect the fold change calculation of the following analysis. We define sgRNAs counts less than 0.05 quantile both in reference group and experiment group as the small count sgRNAs. The mean counts of all the small count sgNRAs were added to the normalized counts. The normalized counts is calculated as following expression: ```{latex} $$Cn_{i} = \frac{Cr_{i}}{S_{ref}} \times 0.5 \times (S_{ref} + S_{exp})$$ $$C_{i} = Cn_{i} + Cm_{small}$$ ``` where, `$Cn_{i}$` is the normalized count of ith sgRNA-iBAR in reference group, `$Cr_{i}$` is the raw count of ith sgRNA-iBAR, `$Cm_{small}$` is the mean counts of all the small count sgRNAs, `$S_{ref}$` is the sum of raw counts of all the sgRNA-iBAR in reference group, `$S_{exp}$` is the sum of raw counts of all the sgRNA-iBAR in experiment group, `$C_{i}$` is the final normalized counts for ith sgRNA-iBAR after small count adjustment. The normalized counts for sgRNAs in experiment group are calculated similarly. ### Step 2: Calculate fold change ### The raw fold change of each sgRNA-iBAR is calculated from the normalized counts of reference and experiment groups. ```{latex} $$fc_{i} = \frac{Cref_{i}}{Cexp_{i}}$$ $$lfc_{i} = \log_{2}fc_{i}$$ ``` where, `$fc_{i}$` is the fold change (FC) of ith sgRNA-iBAR and `$lfc_{i}$` is the log2 fold change (LFC) of ith sgRNA-iBAR, `$Cref_{i}$` and `$Cexp_{i}$` are the normalized counts of reference and experiment groups respectively. ### Step 3: Calculate fold change std ### In order to calculate z-score of LFC (ZLFC), the standard deviation of LFC should be calculated. The LFC of sgRNA-iBAR is related to the normalized counts of reference group. So the standard deviations of LFC are different for sgRNA-iBARs with different normalized counts of reference group. All the sgRNA-iBARs are divided into several sets according to the normalized counts of reference group. And the standard deviations of log fold change are calculated among the divided sets. The LFC standard diviation and the normalized counts of the reference is linearly related. So, linear model is calculated for the LFC sd and reference counts. And the linear model is used to calculate the LFC standard diviation for all the sgRNA-iBAR. ### Step 4: Raw z score of log fold change ### The raw z score of log fold change is calculated. ```{latex} $$raw ZLFC = \frac{LFC}/{LFC std}$$ ``` ### Step 5: Calculate sgRNA mean z score of fold change ### The sgRNA level ZLFCs are calculated as the mean of all the ZLFCs of the relevant sgRNA$^{iBAR}$s. ```{latex} $$ZLFC_{sgRNA} = \frac{\sum{ZLFC_{sgRNA-iBAR}}}{n}$$ ``` where, the sgRNA has n sgRNA-iBAR. Empirical P value is also calculated for the sgRNA ZLFCs. The p value is adjusted considering control of False Discovery Rate. ### Step 6: Calculate gene mean zscore of fold change ### The gene level ZLFCs are calculated as the mean of all the ZLFCs of the relevant sgRNAs. ```{latex} $$ZLFC_{gene} = \frac{\sum{ZLFC_{sgRNA}}}{m}$$ ``` where, the gene has m sgRNAs. Empirical P value is also calculated for the gene ZLFCs. The p value is adjusted considering control of False Discovery Rate. ### Step 7: Robust rank aggregation analysis ### [Robust rank aggregation][3] is utilized to calculate the rank significance of the gene with the sgRNA-iBARs in the whole library. Aside from robust rank aggregation, mean rank aggregation is also calculated. The robust rank score is adjusted considering control of Fault Discovery Rate. *** [1]: <https://genomebiology.biomedcentral.com/articles/10.1186/s13059-019-1628-0> "Zhu, S. et al. Guide RNAs with embedded barcodes boost CRISPR-pooled screens. Genome Biology 20, (2019)." [2]: <http://www.nature.com/articles/s41587-021-00944-1> "Xu, P., Liu, Z., Liu, Y., Ma, H., Xu, Y., Bao, Y., Zhu, S., Cao, Z., Wu, Z., Zhou, Z., et al. (2021). Genome-wide interrogation of gene functions through base editor screens empowered by barcoded sgRNAs. Nat Biotechnol." [3]: <http://bioinformatics.oxfordjournals.org/content/28/4/573.abstract> "Kolde, R., Laur, S., Adler, P. & Vilo, J. Robust rank aggregation for gene list integration and meta-analysis. Bioinformatics 28, 573–580 (2012)."
zfc
/zfc-0.1.7.tar.gz/zfc-0.1.7/README.md
README.md
This work also comes with the added permission that you may combine it with a work licensed under the OpenSSL license (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the OpenSSL license. This work also comes with the added permission that you may combine it with a work licensed under the Eclipse Public Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Eclipse Public Licence. This work also comes with the added permission that you may combine it with a work licensed under the Q Public Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Q Public Licence. This work also comes with the added permission that you may combine it with a work licensed under the Apache Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Apache Licence. This work also comes with the added permission that you may combine it with a work licensed under the GNU Lesser General Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the GNU Lesser General Public License. This work also comes with the added permission that you may combine it with a work licensed under the Zope Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Zope Public License. This work also comes with the added permission that you may combine it with a work licensed under the Python Software Foundation License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Python Software Foundation License. This work also comes with the added permission that you may combine it with a work licensed under the Academic Free License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Academic Free License. This work also comes with the added permission that you may combine it with a work licensed under the Apple Public Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Apple Public Source License. This work also comes with the added permission that you may combine it with a work licensed under the BitTorrent Open Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the BitTorrent Open Source License. This work also comes with the added permission that you may combine it with a work licensed under the Lucent Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Lucent Public License. This work also comes with the added permission that you may combine it with a work licensed under the Jabber Open Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Jabber Open Source License. This work also comes with the added permission that you may combine it with a work licensed under the Common Development and Distribution License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Common Development and Distribution License. This work also comes with the added permission that you may combine it with a work licensed under the Microsoft Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Microsoft Public License. This work also comes with the added permission that you may combine it with a work licensed under the Microsoft Reciprocal License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Microsoft Reciprocal License. This work also comes with the added permission that you may combine it with a work licensed under the Sun Industry Standards Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Sun Industry Standards Source License. This work also comes with the added permission that you may combine it with a work licensed under the Open Software License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Open Software License. ======================================================= Transitive Grace Period Public Licence ("TGPPL") v. 1.0 ======================================================= This Transitive Grace Period Public Licence (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following licensing notice adjacent to the copyright notice for the Original Work: *Licensed under the Transitive Grace Period Public Licence version 1.0* 1. **Grant of Copyright License.** Licensor grants You a worldwide, royalty-free, non-exclusive, sublicensable license, for the duration of the copyright, to do the following: a. to reproduce the Original Work in copies, either alone or as part of a collective work; b. to translate, adapt, alter, transform, modify, or arrange the Original Work, thereby creating derivative works ("Derivative Works") based upon the Original Work; c. to distribute or communicate copies of the Original Work and Derivative Works to the public, with the proviso that copies of Original Work or Derivative Works that You distribute or communicate shall be licensed under this Transitive Grace Period Public Licence no later than 12 months after You distributed or communicated said copies; d. to perform the Original Work publicly; and e. to display the Original Work publicly. 2. **Grant of Patent License.** Licensor grants You a worldwide, royalty-free, non-exclusive, sublicensable license, under patent claims owned or controlled by the Licensor that are embodied in the Original Work as furnished by the Licensor, for the duration of the patents, to make, use, sell, offer for sale, have made, and import the Original Work and Derivative Works. 3. **Grant of Source Code License.** The term "Source Code" means the preferred form of the Original Work for making modifications to it and all available documentation describing how to modify the Original Work. Licensor agrees to provide a machine-readable copy of the Source Code of the Original Work along with each copy of the Original Work that Licensor distributes. Licensor reserves the right to satisfy this obligation by placing a machine-readable copy of the Source Code in an information repository reasonably calculated to permit inexpensive and convenient access by You for as long as Licensor continues to distribute the Original Work. 4. **Exclusions From License Grant.** Neither the names of Licensor, nor the names of any contributors to the Original Work, nor any of their trademarks or service marks, may be used to endorse or promote products derived from this Original Work without express prior permission of the Licensor. Except as expressly stated herein, nothing in this License grants any license to Licensor's trademarks, copyrights, patents, trade secrets or any other intellectual property. No patent license is granted to make, use, sell, offer for sale, have made, or import embodiments of any patent claims other than the licensed claims defined in Section 2. No license is granted to the trademarks of Licensor even if such marks are included in the Original Work. Nothing in this License shall be interpreted to prohibit Licensor from licensing under terms different from this License any Original Work that Licensor otherwise would have a right to license. 5. **External Deployment.** The term "External Deployment" means the use, distribution, or communication of the Original Work or Derivative Works in any way such that the Original Work or Derivative Works may be used by anyone other than You, whether those works are distributed or communicated to those persons or made available as an application intended for use over a network. As an express condition for the grants of license hereunder, You must treat any External Deployment by You of the Original Work or a Derivative Work as a distribution under section 1(c). 6. **Attribution Rights.** You must retain, in the Source Code of any Derivative Works that You create, all copyright, patent, or trademark notices from the Source Code of the Original Work, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the Source Code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Original Work. 7. **Warranty of Provenance and Disclaimer of Warranty.** Licensor warrants that the copyright in and to the Original Work and the patent rights granted herein by Licensor are owned by the Licensor or are sublicensed to You under the terms of this License with the permission of the contributor(s) of those copyrights and patent rights. Except as expressly stated in the immediately preceding sentence, the Original Work is provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without limitation, the warranties of non-infringement, merchantability or fitness for a particular purpose. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No license to the Original Work is granted by this License except under this disclaimer. 8. **Limitation of Liability.** Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to anyone for any indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Original Work including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to the extent applicable law prohibits such limitation. 9. **Acceptance and Termination.** If, at any time, You expressly assented to this License, that assent indicates your clear and irrevocable acceptance of this License and all of its terms and conditions. If You distribute or communicate copies of the Original Work or a Derivative Work, You must make a reasonable effort under the circumstances to obtain the express assent of recipients to the terms of this License. This License conditions your rights to undertake the activities listed in Section 1, including your right to create Derivative Works based upon the Original Work, and doing so without honoring these terms and conditions is prohibited by copyright law and international treaty. Nothing in this License is intended to affect copyright exceptions and limitations (including 'fair use' or 'fair dealing'). This License shall terminate immediately and You may no longer exercise any of the rights granted to You by this License upon your failure to honor the conditions in Section 1(c). 10. **Termination for Patent Action.** This License shall terminate automatically and You may no longer exercise any of the rights granted to You by this License as of the date You commence an action, including a cross-claim or counterclaim, against Licensor or any licensee alleging that the Original Work infringes a patent. This termination provision shall not apply for an action alleging patent infringement by combinations of the Original Work with other software or hardware. 11. **Jurisdiction, Venue and Governing Law.** Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction excluding its conflict-of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Original Work outside the scope of this License or after its termination shall be subject to the requirements and penalties of copyright or patent law in the appropriate jurisdiction. This section shall survive the termination of this License. 12. **Attorneys' Fees.** In any action to enforce the terms of this License or seeking damages relating thereto, the prevailing party shall be entitled to recover its costs and expenses, including, without limitation, reasonable attorneys' fees and costs incurred in connection with such action, including any appeal of such action. This section shall survive the termination of this License. 13. **Miscellaneous.** If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. 14. **Definition of "You" in This License.** "You" throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 15. **Right to Use.** You may use the Original Work in all ways not otherwise restricted or conditioned by this License or by law, and Licensor promises not to interfere with or be responsible for such uses by You. 16. **Modification of This License.** This License is Copyright © 2007 Zooko Wilcox-O'Hearn. Permission is granted to copy, distribute, or communicate this License without modification. Nothing in this License permits You to modify this License as applied to the Original Work or to Derivative Works. However, You may modify the text of this License and copy, distribute or communicate your modified version (the "Modified License") and apply it to other original works of authorship subject to the following conditions: (i) You may not indicate in any way that your Modified License is the "Transitive Grace Period Public Licence" or "TGPPL" and you may not use those names in the name of your Modified License; and (ii) You must replace the notice specified in the first paragraph above with the notice "Licensed under <insert your license name here>" or with a notice of your own that is not confusingly similar to the notice in this License.
zfec
/zfec-1.5.7.2-cp37-cp37m-win_amd64.whl/zfec-1.5.7.2.dist-info/COPYING.TGPPL.rst
COPYING.TGPPL.rst
# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with # pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno import json import os import re import subprocess import sys from pathlib import Path from typing import Callable, Dict import functools have_tomllib = True if sys.version_info >= (3, 11): import tomllib else: try: import tomli as tomllib except ImportError: have_tomllib = False class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . root = Path(root) pyproject_toml = root / "pyproject.toml" setup_cfg = root / "setup.cfg" section = None if pyproject_toml.exists() and have_tomllib: try: with open(pyproject_toml, 'rb') as fobj: pp = tomllib.load(fobj) section = pp['tool']['versioneer'] except (tomllib.TOMLDecodeError, KeyError): pass if not section: parser = configparser.ConfigParser() with open(setup_cfg) as cfg_file: parser.read_file(cfg_file) parser.get("versioneer", "VCS") # raise error if missing section = parser["versioneer"] cfg = VersioneerConfig() cfg.VCS = section['VCS'] cfg.style = section.get("style", "") cfg.versionfile_source = section.get("versionfile_source") cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = section.get("tag_prefix") if cfg.tag_prefix in ("''", '""', None): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode LONG_VERSION_PY['git'] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. # Generated by versioneer-0.28 # https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" import errno import os import re import subprocess import sys from typing import Callable, Dict import functools def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver): """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces): """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%%d" %% (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [versionfile_source] if ipy: files.append(ipy) if "VERSIONEER_PEP518" not in globals(): try: my_path = __file__ if my_path.endswith((".pyc", ".pyo")): my_path = os.path.splitext(my_path)[0] + ".py" versioneer_file = os.path.relpath(my_path) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: with open(".gitattributes", "r") as fobj: for line in fobj: if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True break except OSError: pass if not present: with open(".gitattributes", "a+") as fobj: fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.28) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver): """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces): """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(cmdclass=None): """Get the custom setuptools subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to setuptools from setuptools import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # pip install -e . and setuptool/editable_wheel will invoke build_py # but the build_py command is not expected to copy any files. # we override different "build_py" commands for both environments if 'build_py' in cmds: _build_py = cmds['build_py'] else: from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) if getattr(self, "editable_mode", False): # During editable installs `.py` and data files are # not copied to build_lib return # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if 'build_ext' in cmds: _build_ext = cmds['build_ext'] else: from setuptools.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: # build_ext --inplace will only build extensions in # build/lib<..> dir with no _version.py to write to. # As in place builds will already have a _version.py # in the module dir, we do not need to write one. return # now locate _version.py in the new build/ directory and replace # it with an updated value if not cfg.versionfile_build: return target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) if not os.path.exists(target_versionfile): print(f"Warning: {target_versionfile} does not exist, skipping " "version update. This can happen if you are running build_ext " "without first running build_py.") return print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.setuptools_buildexe import py2exe as _py2exe except ImportError: from py2exe.distutils_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # sdist farms its file list building out to egg_info if 'egg_info' in cmds: _egg_info = cmds['egg_info'] else: from setuptools.command.egg_info import egg_info as _egg_info class cmd_egg_info(_egg_info): def find_sources(self): # egg_info.find_sources builds the manifest list and writes it # in one shot super().find_sources() # Modify the filelist and normalize it root = get_root() cfg = get_config_from_root(root) self.filelist.append('versioneer.py') if cfg.versionfile_source: # There are rare cases where versionfile_source might not be # included by default, so we must be explicit self.filelist.append(cfg.versionfile_source) self.filelist.sort() self.filelist.remove_duplicates() # The write method is hidden in the manifest_maker instance that # generated the filelist and was thrown away # We will instead replicate their final normalization (to unicode, # and POSIX-style paths) from setuptools import unicode_utils normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') for f in self.filelist.files] manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') with open(manifest_filename, 'w') as fobj: fobj.write('\n'.join(normalized)) cmds['egg_info'] = cmd_egg_info # we override different "sdist" commands for both environments if 'sdist' in cmds: _sdist = cmds['sdist'] else: from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ INIT_PY_SNIPPET = """ from . import {0} __version__ = {0}.get_versions()['version'] """ def do_setup(): """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except OSError: old = "" module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] snippet = INIT_PY_SNIPPET.format(module) if OLD_SNIPPET in old: print(" replacing boilerplate in %s" % ipy) with open(ipy, "w") as f: f.write(old.replace(OLD_SNIPPET, snippet)) elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors def setup_command(): """Set up Versioneer and exit with appropriate error code.""" errors = do_setup() errors += scan_setup_py() sys.exit(1 if errors else 0) if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": setup_command()
zfel
/zfel-0.9.4.tar.gz/zfel-0.9.4/versioneer.py
versioneer.py
This work also comes with the added permission that you may combine it with a work licensed under the OpenSSL license (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the OpenSSL license. This work also comes with the added permission that you may combine it with a work licensed under the Eclipse Public Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Eclipse Public Licence. This work also comes with the added permission that you may combine it with a work licensed under the Q Public Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Q Public Licence. This work also comes with the added permission that you may combine it with a work licensed under the Apache Licence (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Apache Licence. This work also comes with the added permission that you may combine it with a work licensed under the GNU Lesser General Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the GNU Lesser General Public License. This work also comes with the added permission that you may combine it with a work licensed under the Zope Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Zope Public License. This work also comes with the added permission that you may combine it with a work licensed under the Python Software Foundation License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Python Software Foundation License. This work also comes with the added permission that you may combine it with a work licensed under the Academic Free License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Academic Free License. This work also comes with the added permission that you may combine it with a work licensed under the Apple Public Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Apple Public Source License. This work also comes with the added permission that you may combine it with a work licensed under the BitTorrent Open Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the BitTorrent Open Source License. This work also comes with the added permission that you may combine it with a work licensed under the Lucent Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Lucent Public License. This work also comes with the added permission that you may combine it with a work licensed under the Jabber Open Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Jabber Open Source License. This work also comes with the added permission that you may combine it with a work licensed under the Common Development and Distribution License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Common Development and Distribution License. This work also comes with the added permission that you may combine it with a work licensed under the Microsoft Public License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Microsoft Public License. This work also comes with the added permission that you may combine it with a work licensed under the Microsoft Reciprocal License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Microsoft Reciprocal License. This work also comes with the added permission that you may combine it with a work licensed under the Sun Industry Standards Source License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Sun Industry Standards Source License. This work also comes with the added permission that you may combine it with a work licensed under the Open Software License (any version) and distribute the resulting combined work, as long as you follow the requirements of the licences of this work in regard to all of the resulting combined work aside from the work licensed under the Open Software License. ======================================================= Transitive Grace Period Public Licence ("TGPPL") v. 1.0 ======================================================= This Transitive Grace Period Public Licence (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following licensing notice adjacent to the copyright notice for the Original Work: *Licensed under the Transitive Grace Period Public Licence version 1.0* 1. **Grant of Copyright License.** Licensor grants You a worldwide, royalty-free, non-exclusive, sublicensable license, for the duration of the copyright, to do the following: a. to reproduce the Original Work in copies, either alone or as part of a collective work; b. to translate, adapt, alter, transform, modify, or arrange the Original Work, thereby creating derivative works ("Derivative Works") based upon the Original Work; c. to distribute or communicate copies of the Original Work and Derivative Works to the public, with the proviso that copies of Original Work or Derivative Works that You distribute or communicate shall be licensed under this Transitive Grace Period Public Licence no later than 12 months after You distributed or communicated said copies; d. to perform the Original Work publicly; and e. to display the Original Work publicly. 2. **Grant of Patent License.** Licensor grants You a worldwide, royalty-free, non-exclusive, sublicensable license, under patent claims owned or controlled by the Licensor that are embodied in the Original Work as furnished by the Licensor, for the duration of the patents, to make, use, sell, offer for sale, have made, and import the Original Work and Derivative Works. 3. **Grant of Source Code License.** The term "Source Code" means the preferred form of the Original Work for making modifications to it and all available documentation describing how to modify the Original Work. Licensor agrees to provide a machine-readable copy of the Source Code of the Original Work along with each copy of the Original Work that Licensor distributes. Licensor reserves the right to satisfy this obligation by placing a machine-readable copy of the Source Code in an information repository reasonably calculated to permit inexpensive and convenient access by You for as long as Licensor continues to distribute the Original Work. 4. **Exclusions From License Grant.** Neither the names of Licensor, nor the names of any contributors to the Original Work, nor any of their trademarks or service marks, may be used to endorse or promote products derived from this Original Work without express prior permission of the Licensor. Except as expressly stated herein, nothing in this License grants any license to Licensor's trademarks, copyrights, patents, trade secrets or any other intellectual property. No patent license is granted to make, use, sell, offer for sale, have made, or import embodiments of any patent claims other than the licensed claims defined in Section 2. No license is granted to the trademarks of Licensor even if such marks are included in the Original Work. Nothing in this License shall be interpreted to prohibit Licensor from licensing under terms different from this License any Original Work that Licensor otherwise would have a right to license. 5. **External Deployment.** The term "External Deployment" means the use, distribution, or communication of the Original Work or Derivative Works in any way such that the Original Work or Derivative Works may be used by anyone other than You, whether those works are distributed or communicated to those persons or made available as an application intended for use over a network. As an express condition for the grants of license hereunder, You must treat any External Deployment by You of the Original Work or a Derivative Work as a distribution under section 1(c). 6. **Attribution Rights.** You must retain, in the Source Code of any Derivative Works that You create, all copyright, patent, or trademark notices from the Source Code of the Original Work, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the Source Code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Original Work. 7. **Warranty of Provenance and Disclaimer of Warranty.** Licensor warrants that the copyright in and to the Original Work and the patent rights granted herein by Licensor are owned by the Licensor or are sublicensed to You under the terms of this License with the permission of the contributor(s) of those copyrights and patent rights. Except as expressly stated in the immediately preceding sentence, the Original Work is provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without limitation, the warranties of non-infringement, merchantability or fitness for a particular purpose. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No license to the Original Work is granted by this License except under this disclaimer. 8. **Limitation of Liability.** Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to anyone for any indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Original Work including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to the extent applicable law prohibits such limitation. 9. **Acceptance and Termination.** If, at any time, You expressly assented to this License, that assent indicates your clear and irrevocable acceptance of this License and all of its terms and conditions. If You distribute or communicate copies of the Original Work or a Derivative Work, You must make a reasonable effort under the circumstances to obtain the express assent of recipients to the terms of this License. This License conditions your rights to undertake the activities listed in Section 1, including your right to create Derivative Works based upon the Original Work, and doing so without honoring these terms and conditions is prohibited by copyright law and international treaty. Nothing in this License is intended to affect copyright exceptions and limitations (including 'fair use' or 'fair dealing'). This License shall terminate immediately and You may no longer exercise any of the rights granted to You by this License upon your failure to honor the conditions in Section 1(c). 10. **Termination for Patent Action.** This License shall terminate automatically and You may no longer exercise any of the rights granted to You by this License as of the date You commence an action, including a cross-claim or counterclaim, against Licensor or any licensee alleging that the Original Work infringes a patent. This termination provision shall not apply for an action alleging patent infringement by combinations of the Original Work with other software or hardware. 11. **Jurisdiction, Venue and Governing Law.** Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction excluding its conflict-of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Original Work outside the scope of this License or after its termination shall be subject to the requirements and penalties of copyright or patent law in the appropriate jurisdiction. This section shall survive the termination of this License. 12. **Attorneys' Fees.** In any action to enforce the terms of this License or seeking damages relating thereto, the prevailing party shall be entitled to recover its costs and expenses, including, without limitation, reasonable attorneys' fees and costs incurred in connection with such action, including any appeal of such action. This section shall survive the termination of this License. 13. **Miscellaneous.** If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. 14. **Definition of "You" in This License.** "You" throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 15. **Right to Use.** You may use the Original Work in all ways not otherwise restricted or conditioned by this License or by law, and Licensor promises not to interfere with or be responsible for such uses by You. 16. **Modification of This License.** This License is Copyright © 2007 Zooko Wilcox-O'Hearn. Permission is granted to copy, distribute, or communicate this License without modification. Nothing in this License permits You to modify this License as applied to the Original Work or to Derivative Works. However, You may modify the text of this License and copy, distribute or communicate your modified version (the "Modified License") and apply it to other original works of authorship subject to the following conditions: (i) You may not indicate in any way that your Modified License is the "Transitive Grace Period Public Licence" or "TGPPL" and you may not use those names in the name of your Modified License; and (ii) You must replace the notice specified in the first paragraph above with the notice "Licensed under <insert your license name here>" or with a notice of your own that is not confusingly similar to the notice in this License.
zfex
/zfex-1.6.0.1-cp39-cp39-win32.whl/zfex-1.6.0.1.dist-info/COPYING.TGPPL.rst
COPYING.TGPPL.rst
# ZHDK Our mission at [Zama](https://zama.ai) is to protect people’s privacy by preventing data breaches and unethical surveillance. Following a recent breakthrough in homomorphic encryption, we are now building a ZeroTrust deep learning framework that enables fast and accurate inference over encrypted data, with minimal performance overhead, no changes to the network architecture and no retraining necessary. [Zama](https://zama.ai) is open-source by design, as we believe privacy-enabling technologies should benefit the widest possible community of developers and researchers. # Warning This package is not a working package. # Test ```python import zfhe zfhe.hello_world() ```
zfhe
/zfhe-0.0.2.tar.gz/zfhe-0.0.2/README.md
README.md
import contextlib import inspect import json import os import pickle from logging import INFO, StreamHandler, getLogger from typing import Callable, Dict, Generator, List, Optional, Tuple, Union import joblib import matplotlib.pyplot as plt import numpy as np import pandas as pd import scienceplots from joblib import Parallel, delayed from scipy.optimize import OptimizeResult from tqdm.auto import tqdm from zfista.metrics import ( calculate_metrics, extract_function_values, extract_non_dominated_points, spread_metrics, ) from zfista.problems import ( FDS, JOS1, SD, TOI4, TRIDIA, ZDT1, LinearFunctionRank1, Problem, ) logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(INFO) logger.setLevel(INFO) logger.addHandler(handler) plt.style.use(["science", "bright"]) @contextlib.contextmanager def tqdm_joblib(total: Optional[int] = None, **kwargs) -> Generator: pbar = tqdm(total=total, miniters=1, smoothing=0, **kwargs) class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack): def __call__(self, *args, **kwargs): pbar.update(n=self.batch_size) return super().__call__(*args, **kwargs) old_batch_callback = joblib.parallel.BatchCompletionCallBack joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback try: yield pbar finally: joblib.parallel.BatchCompletionCallBack = old_batch_callback pbar.close() def create_directory(problem: Problem, experiment_name: str) -> str: directory = os.path.join("results", experiment_name, problem.name) os.makedirs(directory, exist_ok=True) return directory def show_Pareto_front( res_normal: List[OptimizeResult], res_acc: List[OptimizeResult], res_acc_deprecated: List[OptimizeResult], iters: int = 10, s: float = 15, alpha: float = 0.75, elev: float = 15, azim: float = 130, linewidth: float = 0.1, fname: Optional[str] = None, ) -> None: if len(res_normal[0].fun) > 3: return F_normal = extract_function_values(res_normal) F_acc = extract_function_values(res_acc) F_acc_deprecated = extract_function_values(res_acc_deprecated) F_0 = np.array([res.allfuns[0] for res in res_normal]) normal_color = "#6536FF" acc_color = "#e74c3c" acc_dep_color = "#3cc756" initial_color = "#8e44ad" common_style = {"s": s, "alpha": alpha, "linewidth": linewidth} fig = plt.figure(figsize=(7.5, 7.5), dpi=100) if len(res_normal[0].fun) == 2: ax = fig.add_subplot(111) fig.subplots_adjust(left=0, right=1, bottom=0, top=1) ax.scatter( F_0[:, 0], F_0[:, 1], label="Initial point", marker="x", color=initial_color, **common_style, ) ax.scatter( F_normal[:, 0], F_normal[:, 1], label="Normal", marker="o", color=normal_color, **common_style, ) ax.scatter( F_acc[:, 0], F_acc[:, 1], label="Accelerated", marker="^", color=acc_color, **common_style, ) ax.scatter( F_acc_deprecated[:, 0], F_acc_deprecated[:, 1], label="Accelerated (without $f_i(y^k) - F_i(x^k)$)", marker="s", color=acc_dep_color, **common_style, ) F_iter_normal = np.array( [res.allfuns[iters] for res in res_normal if res.nit >= iters] ) F_iter_acc = np.array( [res.allfuns[iters] for res in res_acc if res.nit >= iters] ) F_iter_acc_deprecated = np.array( [res.allfuns[iters] for res in res_acc_deprecated if res.nit >= iters] ) if len(F_iter_normal) > 0: ax.scatter( F_iter_normal[:, 0], F_iter_normal[:, 1], label=f"Normal ({iters} iters)", marker="o", edgecolors=normal_color, facecolors="none", **common_style, ) if len(F_iter_acc) > 0: ax.scatter( F_iter_acc[:, 0], F_iter_acc[:, 1], label=f"Accelerated ({iters} iters)", marker="^", edgecolors=acc_color, facecolors="none", **common_style, ) if len(F_iter_acc_deprecated) > 0: ax.scatter( F_iter_acc_deprecated[:, 0], F_iter_acc_deprecated[:, 1], label=f"Accelerated (without $f_i(y^k) - F_i(x^k)$, {iters} iters)", marker="s", edgecolors=acc_dep_color, facecolors="none", **common_style, ) ax.set_xlabel(f"$F_1$") ax.set_ylabel(f"$F_2$") ax.legend() elif len(res_normal[0].fun) == 3: ax = fig.add_subplot(111, projection="3d") ax.view_init(elev=elev, azim=azim) ax.scatter( F_normal[:, 0], F_normal[:, 1], F_normal[:, 2], label="Normal", marker="o", color=normal_color, **common_style, ) ax.scatter( F_acc[:, 0], F_acc[:, 1], F_acc[:, 2], label="Accelerated", marker="^", color=acc_color, **common_style, ) ax.scatter( F_acc_deprecated[:, 0], F_acc_deprecated[:, 1], F_acc_deprecated[:, 2], label="Accelerated (without $f_i(y^k) - F_i(x^k)$)", marker="s", color=acc_dep_color, **common_style, ) ax.set_xlabel(f"$F_1$") ax.set_ylabel(f"$F_2$") ax.set_zlabel(f"$F_3$") ax.legend() if fname is not None: plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() def show_error_decay( res_normal: OptimizeResult, res_acc: OptimizeResult, res_acc_deprecated: OptimizeResult, fname: Optional[str] = None, ): normal_color = "#6536FF" acc_color = "#e74c3c" acc_dep_color = "#3cc756" plt.figure(figsize=(7.5, 7.5), dpi=100) plt.yscale("log") plt.xlabel(r"$k$") plt.ylabel(r"$\|x^k - y^k\|_\infty$") plt.plot(res_normal.allerrs, label="Normal", color=normal_color, linestyle="dashed") plt.plot(res_acc.allerrs, label="Accelerated", color=acc_color) plt.plot( res_acc_deprecated.allerrs, label="Accelerated (without $f_i(y^k) - F_i(x^k)$)", color=acc_dep_color, linestyle="dotted", ) plt.legend() if fname is not None: plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() def save_results( problem: Problem, experiment_name: str, res_normal: List[OptimizeResult], res_acc: List[OptimizeResult], res_acc_deprecated: List[OptimizeResult], metrics: Dict[str, Dict[str, float]], ) -> None: logger.info("Saving results...") directory = create_directory(problem, experiment_name) with open(os.path.join(directory, "metrics.json"), "w") as f: json.dump(metrics, f, indent=4) show_Pareto_front( res_normal, res_acc, res_acc_deprecated, fname=os.path.join(directory, "pareto_front.pdf"), ) show_error_decay( res_normal[0], res_acc[0], res_acc_deprecated[0], fname=os.path.join(directory, "error_decay.pdf"), ) logger.info("Results saved.") def load_or_run_results( file_name: str, directory: str, overwrite: bool, run_fn: Callable, ) -> List[OptimizeResult]: if not overwrite and os.path.exists(os.path.join(directory, file_name)): try: logger.info(f"Loading {file_name}...") with open(os.path.join(directory, file_name), "rb") as f: results = pickle.load(f) except Exception as e: logger.warning(f"Failed to load {file_name} due to: {e}") results = run_fn() with open(os.path.join(directory, file_name), "wb") as f: pickle.dump(results, f) else: logger.info(f"Running {file_name}...") results = run_fn() with open(os.path.join(directory, file_name), "wb") as f: pickle.dump(results, f) return results def benchmark( problem: Problem, experiment_name: str, low: Union[float, np.ndarray], high: Union[float, np.ndarray], n_samples: int = 100, overwrite: bool = False, max_iter: int = 100000000, verbose: bool = False, ) -> Tuple[List[OptimizeResult], List[OptimizeResult], List[OptimizeResult]]: directory = create_directory(problem, experiment_name) initial_points = np.random.uniform( low=low, high=high, size=(n_samples, problem.n_features) ) with tqdm_joblib(total=n_samples, desc="Normal") as progress_bar: res_normal = load_or_run_results( "normal_results.pkl", directory, overwrite, lambda: Parallel(n_jobs=-1)( delayed(problem.minimize_proximal_gradient)( x0, return_all=True, max_iter=max_iter, max_iter_internal=max_iter, verbose=verbose, ) for x0 in initial_points ), ) with tqdm_joblib(total=n_samples, desc="Accelerated") as progress_bar: res_acc = load_or_run_results( "accelerated_results.pkl", directory, overwrite, lambda: Parallel(n_jobs=-1)( delayed(problem.minimize_proximal_gradient)( x0, nesterov=True, return_all=True, max_iter=max_iter, max_iter_internal=max_iter, verbose=verbose, ) for x0 in initial_points ), ) with tqdm_joblib( total=n_samples, desc="Accelerated (without $f_i(y^k) - F_i(x^k)$)" ) as progress_bar: res_acc_deprecated = load_or_run_results( "accelerated_results_deprecated.pkl", directory, overwrite, lambda: Parallel(n_jobs=-1)( delayed(problem.minimize_proximal_gradient)( x0, nesterov=True, return_all=True, deprecated=True, max_iter=max_iter, max_iter_internal=max_iter, verbose=verbose, ) for x0 in initial_points ), ) return res_normal, res_acc, res_acc_deprecated def generate_performance_profiles( performance_ratios: Dict[str, Dict[str, List[float]]] ) -> Dict[str, Dict[str, Tuple[np.ndarray, np.ndarray]]]: performance_profiles: Dict[str, Dict[str, Tuple[np.ndarray, np.ndarray]]] = {} for ratio_key, algorithm_ratios in performance_ratios.items(): performance_profiles[ratio_key] = {} for algorithm, ratios in algorithm_ratios.items(): thresholds = [] percentages = [] for i, ratio in enumerate(sorted(ratios)): thresholds.append(ratio) percentages.append((i + 1) / len(ratios)) performance_profiles[ratio_key][algorithm] = ( np.array(thresholds), np.array(percentages), ) return performance_profiles def plot_performance_profiles( metric_key: str, algorithm_profiles: Dict[str, Tuple[np.ndarray, np.ndarray]], fname: Optional[str], ) -> None: plt.figure(figsize=(7.5, 7.5), dpi=100) plt.xlabel("Threshold") plt.ylabel("Percentage of Problems") for algorithm, profile in algorithm_profiles.items(): thresholds, percentages = profile plt.step(thresholds, percentages, label=algorithm) plt.legend() if fname is not None: plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() def main(overwrite=False, verbose=False) -> None: n_features_list = [5, 10, 20, 50, 100, 200, 500, 1000] problem_classes = [ JOS1, FDS, SD, ZDT1, TOI4, TRIDIA, LinearFunctionRank1, ] problems = [] for problem_class in problem_classes: constructor_params = inspect.signature(problem_class.__init__).parameters # type: ignore if "n_features" in constructor_params: for n_features in n_features_list: problem = problem_class(n_features=n_features) problems.append(problem) if ( "l1_ratios" in constructor_params and "l1_shifts" in constructor_params ): n_objectives = problem.n_objectives l1_ratios = (np.arange(n_objectives) + 1) / n_features l1_shifts = np.arange(n_objectives) problems.append( problem_class( n_features=n_features, l1_ratios=l1_ratios, l1_shifts=l1_shifts, ) ) else: problem = problem_class() problems.append(problem) if "l1_ratios" in constructor_params and "l1_shifts" in constructor_params: n_features = problem.n_features n_objectives = problem.n_objectives l1_ratios = (np.arange(n_objectives) + 1) / n_features l1_shifts = np.arange(n_objectives) problems.append(problem_class(l1_ratios=l1_ratios, l1_shifts=l1_shifts)) experiment_name = "proximal_vs_accelerated_proximal" problem_parameters = { "JOS1": {"low": -2, "high": 4}, "FDS": {"low": -2, "high": 2}, "SD": {"low": [1, np.sqrt(2), np.sqrt(2), 1], "high": [3, 3, 3, 3]}, "ZDT1": {"low": 0, "high": 0.01}, "TOI4": {"low": -2, "high": 5}, "TRIDIA": {"low": -1, "high": 1}, "LinearFunctionRank1": {"low": -1, "high": 1}, } performance_ratios: Dict[str, Dict[str, List[float]]] = {} df_rows = [] for problem in problems: logger.info(f"Running benchmark for {problem.name}...") problem_params = problem_parameters.get(type(problem).__name__) low, high = problem_params.get("low"), problem_params.get("high") # type: ignore res_normal, res_acc, res_acc_deprecated = benchmark( problem, experiment_name, low, high, overwrite=overwrite, verbose=verbose, ) metrics, ratios = calculate_metrics( ("Normal", res_normal), ("Accelerated", res_acc), ("Accelerated (without $f_i(y^k) - F_i(x^k)$)", res_acc_deprecated), ) # Add metrics to dataframe for metric_key, algorithms_metrics in metrics.items(): df_rows.extend( [ { "problem": problem.name, "algorithm": algorithm, "metric": metric_key, "value": metric, } for algorithm, metric in algorithms_metrics.items() ] ) for ratio_key, algorithms_ratios in ratios.items(): if ratio_key not in performance_ratios: performance_ratios[ratio_key] = {} for algorithm, ratio in algorithms_ratios.items(): if algorithm not in performance_ratios[ratio_key]: performance_ratios[ratio_key][algorithm] = [] performance_ratios[ratio_key][algorithm].append(ratio) save_results( problem, experiment_name, res_normal, res_acc, res_acc_deprecated, metrics ) logger.info(f"Benchmark completed for {problem.name}.") performance_profiles = generate_performance_profiles(performance_ratios) for ratio_key, algorithm_profiles in performance_profiles.items(): logger.info(f"Plotting performance profile for {ratio_key}...") plot_performance_profiles( ratio_key, algorithm_profiles, os.path.join("results", experiment_name, f"{ratio_key}.pdf"), ) # Save metrics to csv df = pd.concat([pd.DataFrame(row, index=[0]) for row in df_rows], ignore_index=True) df.columns = ["problem", "algorithm", "metric", "value"] df.to_csv(os.path.join("results", experiment_name, "metrics.csv"), index=False)
zfista
/zfista-0.0.2-py3-none-any.whl/benchmarks/benchmark.py
benchmark.py
======== Overview ======== .. start-badges .. list-table:: :stub-columns: 1 * - docs - |docs| * - tests - | |travis| | |coveralls| |codecov| | |codacy| * - package - | |version| |wheel| |supported-versions| |supported-implementations| | |commits-since| .. |docs| image:: https://readthedocs.org/projects/zfit-interface/badge/?style=flat :target: https://readthedocs.org/projects/zfit-interface :alt: Documentation Status .. |travis| image:: https://api.travis-ci.org/zfit/zfit-interface.svg?branch=master :alt: Travis-CI Build Status :target: https://travis-ci.org/zfit/zfit-interface .. |coveralls| image:: https://coveralls.io/repos/zfit/zfit-interface/badge.svg?branch=master&service=github :alt: Coverage Status :target: https://coveralls.io/r/zfit/zfit-interface .. |codecov| image:: https://codecov.io/github/zfit/zfit-interface/coverage.svg?branch=master :alt: Coverage Status :target: https://codecov.io/github/zfit/zfit-interface .. |codacy| image:: https://img.shields.io/codacy/grade/[Get ID from https://app.codacy.com/app/zfit/zfit-interface/settings].svg :target: https://www.codacy.com/app/zfit/zfit-interface :alt: Codacy Code Quality Status .. |version| image:: https://img.shields.io/pypi/v/zfit-interface.svg :alt: PyPI Package latest release :target: https://pypi.org/project/zfit-interface .. |wheel| image:: https://img.shields.io/pypi/wheel/zfit-interface.svg :alt: PyPI Wheel :target: https://pypi.org/project/zfit-interface .. |supported-versions| image:: https://img.shields.io/pypi/pyversions/zfit-interface.svg :alt: Supported versions :target: https://pypi.org/project/zfit-interface .. |supported-implementations| image:: https://img.shields.io/pypi/implementation/zfit-interface.svg :alt: Supported implementations :target: https://pypi.org/project/zfit-interface .. |commits-since| image:: https://img.shields.io/github/commits-since/zfit/zfit-interface/v0.0.1.svg :alt: Commits since latest release :target: https://github.com/zfit/zfit-interface/compare/v0.0.1...master .. end-badges zfit model fitting interface for HEP * Free software: BSD 3-Clause License Installation ============ :: pip install zfit-interface You can also install the in-development version with:: pip install https://github.com/zfit/zfit-interface/archive/master.zip Documentation ============= https://zfit-interface.readthedocs.io/ Development =========== To run the all tests run:: tox Note, to combine the coverage data from all the tox environments run: .. list-table:: :widths: 10 90 :stub-columns: 1 - - Windows - :: set PYTEST_ADDOPTS=--cov-append tox - - Other - :: PYTEST_ADDOPTS=--cov-append tox
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/README.rst
README.rst
============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. Bug reports =========== When `reporting a bug <https://github.com/zfit/zfit-interface/issues>`_ please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Documentation improvements ========================== zfit-interface could always use more documentation, whether as part of the official zfit-interface docs, in docstrings, or even on the web in blog posts, articles, and such. Feature requests and feedback ============================= The best way to send feedback is to file an issue at https://github.com/zfit/zfit-interface/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that code contributions are welcome :) Development =========== To set up `zfit-interface` for local development: 1. Fork `zfit-interface <https://github.com/zfit/zfit-interface>`_ (look for the "Fork" button). 2. Clone your fork locally:: git clone [email protected]:zfit/zfit-interface.git 3. Create a branch for local development:: git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 4. When you're done making changes run all the checks and docs builder with `tox <https://tox.readthedocs.io/en/latest/install.html>`_ one command:: tox 5. Commit your changes and push your branch to GitHub:: git add . git commit -m "Your detailed description of your changes." git push origin name-of-your-bugfix-or-feature 6. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- If you need some code review or feedback while you're developing the code just make the pull request. For merging, you should: 1. Include passing tests (run ``tox``) [1]_. 2. Update documentation when there's new API, functionality etc. 3. Add a note to ``CHANGELOG.rst`` about the changes. 4. Add yourself to ``AUTHORS.rst``. .. [1] If you don't have all the necessary python versions available locally you can rely on Travis - it will `run the tests <https://travis-ci.org/zfit/zfit-interface/pull_requests>`_ for each change you add in the pull request. It will be slower though ... Tips ---- To run a subset of tests:: tox -e envname -- pytest -k test_myfeature To run all the test environments in *parallel* (you need to ``pip install detox``):: detox
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/CONTRIBUTING.rst
CONTRIBUTING.rst
from zfit_interface.func import ZfitFunc from zfit_interface.typing import ArrayLike from zfit_interface.typing import LimitsInputType from zfit_interface.typing import NormInputType from zfit_interface.typing import OptionsInputType from zfit_interface.typing import VarInputType from zfit_interface.variables import ZfitComposedVariable from zfit_interface.variables import ZfitSpace class ZfitPDF(ZfitFunc): def pdf( self, var: VarInputType, norm: NormInputType = None, *, options=None ) -> ArrayLike: """Probability density of the defined function, normalized over *norm* to 1.""" raise NotImplementedError def ext_pdf( self, var: VarInputType, norm: NormInputType = None, *, options=None ) -> ArrayLike: """Probability density of the defined function, normalized over *norm* to the yield. This method is only available for extended PDFs. """ raise NotImplementedError def counts( self, var: VarInputType, norm: NormInputType, *, options=None ) -> ArrayLike: """Counts of a histogram, corresponds to the integral over *ext_pdf* for the bin edges. This method corresponds to the "frequency" of a histogram. This method is only available for extended PDFs. """ raise NotImplementedError def rel_counts( self, var: VarInputType, norm: NormInputType, *, options=None ) -> ArrayLike: """Relative counts of a histogram, corresponds to the integral over *pdf* for the bin edges. This method corresponds to the "relative frequency" of a histogram. """ raise NotImplementedError def integrate( self, limits: LimitsInputType = None, *, norm: NormInputType = None, var: VarInputType = None, options: OptionsInputType = None ) -> ArrayLike: """Integrate (analytically, otherwise numerically) over *limits*.""" raise NotImplementedError def ext_integrate(self, limits=None, *, norm=None, var=None, options=None): raise NotImplementedError def sample(self, n=None, limits=None, *, var=None, options=None): raise NotImplementedError @property def is_extended(self) -> bool: raise NotImplementedError def get_yield(self) -> ZfitComposedVariable: raise NotImplementedError @property def space(self) -> ZfitSpace: raise NotImplementedError @property def norm(self) -> ZfitSpace: raise NotImplementedError class Integral: @classmethod def register_integral(cls, limits, func): raise NotImplementedError @classmethod def register_cdf(cls, upper, func): raise NotImplementedError class Sampling: @classmethod def register_inverse_integral(cls, limits, func): raise NotImplementedError @classmethod def register_icdf(cls, upper, func): raise NotImplementedError
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/zfit_interface/pdf.py
pdf.py
from __future__ import annotations import typing from collections.abc import Iterable from typing import T from uhi.typing.plottable import PlottableTraits import zfit_interface.typing as ztyping class ZfitVar: @property def name(self) -> str: raise NotImplementedError @property def label(self) -> str: raise NotImplementedError @property def binning(self): raise NotImplementedError @property def is_binned(self): raise NotImplementedError class ZfitBinning: @property def traits(self) -> PlottableTraits: ... def __getitem__(self, index: int) -> T: """Get the pair of edges (not discrete) or bin label (discrete).""" def __len__(self) -> int: """Return the number of bins (not counting flow bins, which are ignored for this Protocol currently).""" def __eq__(self, other: typing.Any) -> bool: """Required to be sequence-like.""" def __iter__(self) -> typing.Iterator[T]: """Useful element of a Sequence to include.""" class PlottableTraits: __slots__ = ["_circular", "_discrete"] def __init__(self, circular: bool = None, discrete: bool = None): self._circular = False if circular is None else circular self._discrete = False if discrete is None else discrete @property def circular(self) -> bool: """True if the axis "wraps around".""" return self._circular @property def discrete(self) -> bool: """ True if each bin is discrete - Integer, Boolean, or Category, for example """ return self._discrete def __eq__(self, o: object) -> bool: if not isinstance(o, PlottableTraits): return NotImplemented return not (self.circular ^ o.circular or self.discrete ^ o.discrete) continuous_trait = PlottableTraits(circular=False, discrete=False) class BaseBinning(ZfitBinning): def __init__(self, traits: PlottableTraits = None, **kwargs) -> None: super().__init__(**kwargs) traits._traits = traits if traits is not None else continuous_trait @property def traits(self) -> PlottableTraits: return self._traits class BaseVar(ZfitVar): def __init__(self, name: str, binning: ZfitBinning = None, label: str = None): self._name = name self._label = label self._binning = self._check_input_init_binning(binning) @property def name(self) -> str: return self._name @property def label(self) -> str: return self._label or self.name @property def is_binned(self): return self.binning is not None @property def binning(self): return self._binning def __repr__(self) -> str: return f"<Var {self.label}, binned={self.binning}>" def _check_input_init_binning(self, binning): return binning class ZfitVars: @property def names(self) -> Iterable[str]: raise NotImplementedError @property def labels(self) -> Iterable[str]: raise NotImplementedError @property def binnings(self): raise NotImplementedError class ZfitAxis(ZfitVar): @property def lower(self) -> ztyping.RealScalar: raise NotImplementedError @property def upper(self) -> ztyping.RealScalar: raise NotImplementedError class ZfitSpace(ZfitVar): # TODO: how to name things? @property def lower(self) -> ztyping.RealScalar: raise NotImplementedError @property def upper(self) -> ztyping.RealScalar: raise NotImplementedError class ZfitParam(ZfitVar): """A parameter is primarily used to communicate intentions in two cases: 1. for a minimizer. It tells the minimizer what the initial value is, what the limits are and the stepsize; also whether it is considered to be floating in the fit or fixed. 2. for a model. It is an unbinned variable that should be regarded, in the default case, as """ def value(self) -> ztyping.RealScalar: raise NotImplementedError @property def limit(self) -> tuple[ztyping.RealScalar, ztyping.RealScalar]: return self.lower, self.upper # @property # def lower(self) -> ztyping.RealScalar: # raise NotImplementedError # # @property # def upper(self) -> ztyping.RealScalar: # raise NotImplementedError @property def stepsize(self) -> ztyping.RealScalar: raise NotImplementedError @property def floating(self) -> bool: raise NotImplementedError class ZfitParams(ZfitVars): def values(self) -> Iterable[ztyping.ArrayLike]: raise NotImplementedError @property def limits(self) -> tuple[ztyping.ArrayLike, ztyping.ArrayLike]: return self.lower, self.upper # @property # def lower(self) -> ztyping.RealScalar: # raise NotImplementedError # # @property # def upper(self) -> ztyping.RealScalar: # raise NotImplementedError @property def stepsizes(self) -> ztyping.ArrayLike: raise NotImplementedError @property def floating(self) -> ztyping.ArrayLike: raise NotImplementedError class ZfitComposedVariable(ZfitVar): pass
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/zfit_interface/variables.py
variables.py
from abc import abstractmethod class ZfitModel: @abstractmethod def update_integration_options( self, *args, **kwargs ): # TODO: handling integration properly raise NotImplementedError @abstractmethod def integrate( self, limits: ztyping.LimitsType, norm_range: ztyping.LimitsType = None, name: str = "integrate", ) -> ztyping.XType: """Integrate the function over `limits` (normalized over `norm_range` if not False). Args: limits: the limits to integrate over norm_range: the limits to normalize over or False to integrate the unnormalized probability name: Returns: The integral value """ raise NotImplementedError @classmethod @abstractmethod def register_analytic_integral( cls, func: Callable, limits: ztyping.LimitsType = None, priority: int = 50, *, supports_norm_range: bool = False, supports_multiple_limits: bool = False ): """Register an analytic integral with the class. Args: func: limits: |limits_arg_descr| priority: supports_multiple_limits: supports_norm_range: Returns: """ raise NotImplementedError @abstractmethod def partial_integrate( self, x: ztyping.XType, limits: ztyping.LimitsType, norm_range: ztyping.LimitsType = None, ) -> ztyping.XType: """Partially integrate the function over the `limits` and evaluate it at `x`. Dimension of `limits` and `x` have to add up to the full dimension and be therefore equal to the dimensions of `norm_range` (if not False) Args: x: The value at which the partially integrated function will be evaluated limits: the limits to integrate over. Can contain only some axes norm_range: the limits to normalize over. Has to have all axes Returns: The value of the partially integrated function evaluated at `x`. """ raise NotImplementedError @classmethod @abstractmethod def register_inverse_analytic_integral(cls, func: Callable): """Register an inverse analytical integral, the inverse (unnormalized) cdf. Args: func: """ raise NotImplementedError @abstractmethod def sample(self, n: int, limits: ztyping.LimitsType = None) -> ztyping.XType: """Sample `n` points within `limits` from the model. Args: n: The number of samples to be generated limits: In which region to sample in name: Returns: Tensor(n_obs, n_samples) """ raise NotImplementedError class ZfitFunc(ZfitModel): @abstractmethod def func(self, x: ztyping.XType, name: str = "value") -> ztyping.XType: raise NotImplementedError @abstractmethod def as_pdf(self): raise NotImplementedError class ZfitPDF(ZfitModel): @abstractmethod def pdf( self, x: ztyping.XType, norm_range: ztyping.LimitsType = None ) -> ztyping.XType: raise NotImplementedError @property @abstractmethod def is_extended(self) -> bool: raise NotImplementedError @abstractmethod def set_norm_range(self): raise NotImplementedError @abstractmethod def create_extended(self, yield_: ztyping.ParamTypeInput) -> "ZfitPDF": raise NotImplementedError @abstractmethod def get_yield(self) -> Union[ZfitParameter, None]: raise NotImplementedError @abstractmethod def normalization(self, limits: ztyping.LimitsType) -> ztyping.NumericalTypeReturn: raise NotImplementedError @abstractmethod def as_func(self, norm_range: ztyping.LimitsType = False): raise NotImplementedError class ZfitFunctorMixin: @property @abstractmethod def models(self) -> Dict[Union[float, int, str], ZfitModel]: raise NotImplementedError @abstractmethod def get_models(self) -> List[ZfitModel]: raise NotImplementedError
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/zfit_interface/old/model.py
model.py
from __future__ import annotations from abc import abstractmethod from collections.abc import Mapping from typing import Union from zfit_interface.old.loss import ZfitMinimalLoss from zfit_interface.old.minimizer import ZfitMinimizer from zfit_interface.old.param import ZfitParameter class ZfitMinimalResult: @property @abstractmethod def minimizer(self) -> ZfitMinimizer: raise NotImplementedError @property @abstractmethod def params(self) -> Mapping[ZfitParameter, Mapping[str, object]]: raise NotImplementedError @property @abstractmethod def fmin(self) -> float: raise NotImplementedError @property @abstractmethod def valid(self) -> bool: raise NotImplementedError @property @abstractmethod def loss(self) -> ZfitMinimalLoss: raise NotImplementedError class ZfitResult: @abstractmethod def hesse(self, params, method): """Calculate for `params` the symmetric error using the Hessian matrix. Args: params: The parameters to calculate the Hessian symmetric error. If None, use all parameters. method: the method to calculate the hessian. Can be {'minuit'} or a callable. Returns: Result of the hessian (symmetric) error as dict with each parameter holding the error dict {'error': sym_error}. So given param_a (from zfit.Parameter(.)) `error_a = result.hesse(params=param_a)[param_a]['error']` error_a is the hessian error. """ raise NotImplementedError @abstractmethod def errors(self, params, method, sigma): """Calculate and set for `params` the asymmetric error using the set error method. Args: params: The parameters or their names to calculate the errors. If `params` is `None`, use all *floating* parameters. method: The method to use to calculate the errors. Valid choices are {'minuit_minos'} or a Callable. Returns: A `OrderedDict` containing as keys the parameter names and as value a `dict` which contains (next to probably more things) two keys 'lower' and 'upper', holding the calculated errors. Example: result['par1']['upper'] -> the asymmetric upper error of 'par1' """ raise NotImplementedError @property @abstractmethod def edm(self): raise NotImplementedError @property @abstractmethod def converged(self): raise NotImplementedError
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/zfit_interface/old/result.py
result.py
================ zfit interface ================ Welcome to the zfit interface definition! .. panels:: :header: text-center :img-top-cls: pl-2 pr-2 bw-success pt-4 :img-top: images/zfit-logo_400x168.png .. link-button:: project/index :type: ref :text: The why and what :classes: btn-outline-primary btn-block stretched-link --- :img-top-cls: + pt-4 :img-top: images/zfit_workflow_v2.png .. link-button:: api_definition/index :type: ref :text: API and workflow :classes: btn-outline-primary btn-block stretched-link --- :img-top-cls: + pt-4 :img-top: images/P5p_Value.png .. link-button:: https://zfit-tutorials.readthedocs.io/en/latest/ :type: url :text: zfit in action :classes: btn-outline-primary btn-block stretched-link --- :img-top-cls: + pt-4 :img-top: images/logo_graph_tensorflow.png .. link-button:: changelog :type: ref :text: News and changes :classes: btn-outline-primary btn-block stretched-link
zfit-interface
/zfit_interface-0.0.3.tar.gz/zfit_interface-0.0.3/docs/index.rst
index.rst