code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# from tensorflow.keras import Model, Input
# from tensorflow.keras.applications import vgg16, resnet50
# from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
# from tensorflow.keras import layers
# import tensorflow as tf
#
# """
# FCN-8特点:
# 1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
# 2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
# 3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
# 4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
# 上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
# 当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
#
# FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
# 1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
# 如果直接上采样 32 倍预测输出,被称为 FCN-32。
# FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
# """
#
#
# def fcn8_helper(input_shape, num_classes, backbone):
# assert input_shape[0] % 32 == 0
# assert input_shape[1] % 32 == 0
#
# inputs = Input(input_shape)
# if backbone == 'vgg16':
# base_model = vgg16.VGG16(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=100)
# elif backbone == 'resnet50':
# base_model = resnet50.ResNet50(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=1000)
# assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
#
# out = Conv2D(
# filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
# kernel_initializer="he_normal", name="score_fr")(out)
#
# # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
# out = Conv2DTranspose(
# filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
#
# fcn8 = Model(inputs=inputs, outputs=out)
# return fcn8
#
#
# def fcn8_model(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16')
#
# # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, filters]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# def fcn8_model_resnet50(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50')
#
# # "block4_pool" shape: [B, 16, 16, 1024] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, 512]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# if __name__ == '__main__':
# # m = FCN8(15, 320, 320)
# # from keras.utils import plot_model
# #
# # plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# # print(len(m.layers))
# model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1)
# model_1.summary()
# # inputs = tf.keras.Input((256, 256, 3))
# # base_model = resnet50.ResNet50(input_tensor=inputs,
# # include_top=False,
# # weights='imagenet',
# # pooling=None,
# # classes=1000)
# # base_model.summary()
from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
"""
FCN-8特点:
1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
如果直接上采样 32 倍预测输出,被称为 FCN-32。
FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
"""
def fcn8_helper(input_shape, num_classes, weight_name='imagenet'):
assert input_shape[0] % 32 == 0
assert input_shape[1] % 32 == 0
inputs = Input(input_shape)
base_model = vgg16.VGG16(input_tensor=inputs,
include_top=False,
weights=weight_name,
pooling=None,
classes=100)
assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
out = Conv2D(
filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
kernel_initializer="he_normal", name="score_fr")(out)
# [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
out = Conv2DTranspose(
filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
fcn8 = Model(inputs=inputs, outputs=out)
return fcn8
def fcn8_model(input_shape, num_classes):
fcn8 = fcn8_helper(input_shape, num_classes)
# "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
skip_con1 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
Summed = add(inputs=[skip_con1, fcn8.output])
# [B, 32, 32, num_classes]
x = Conv2DTranspose(
num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
# block3_pool: [B, 32, 32, filters]
skip_con2 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
Summed2 = add(inputs=[skip_con2, x])
# 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
outputs = Conv2DTranspose(
num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
activation='sigmoid', name="upsample")(Summed2)
if num_classes == 1:
outputs = layers.Activation('sigmoid')(outputs)
else:
outputs = layers.Softmax()(outputs)
fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# for layer_ in fcn_model.layers[:]:
# layer_.trainable = True
return fcn_model
if __name__ == '__main__':
# m = FCN8(15, 320, 320)
# from keras.utils import plot_model
#
# plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# print(len(m.layers))
model_1 = fcn8_model(input_shape=(256, 256, 3), num_classes=1)
model_1.summary()
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.add",
"tensorflow.keras.Input",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.Activation"
]
| [((7112, 7130), 'tensorflow.keras.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (7117, 7130), False, 'from tensorflow.keras import Model, Input\n'), ((7149, 7252), 'tensorflow.keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'input_tensor': 'inputs', 'include_top': '(False)', 'weights': 'weight_name', 'pooling': 'None', 'classes': '(100)'}), '(input_tensor=inputs, include_top=False, weights=weight_name,\n pooling=None, classes=100)\n', (7160, 7252), False, 'from tensorflow.keras.applications import vgg16, resnet50\n'), ((8154, 8187), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'out'}), '(inputs=inputs, outputs=out)\n', (8159, 8187), False, 'from tensorflow.keras import Model, Input\n'), ((8574, 8610), 'tensorflow.keras.layers.add', 'add', ([], {'inputs': '[skip_con1, fcn8.output]'}), '(inputs=[skip_con1, fcn8.output])\n', (8577, 8610), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9041, 9067), 'tensorflow.keras.layers.add', 'add', ([], {'inputs': '[skip_con2, x]'}), '(inputs=[skip_con2, x])\n', (9044, 9067), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9462, 9517), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'fcn8.input', 'outputs': 'outputs', 'name': '"""FCN8s"""'}), "(inputs=fcn8.input, outputs=outputs, name='FCN8s')\n", (9467, 9517), False, 'from tensorflow.keras import Model, Input\n'), ((7474, 7561), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1024)', 'kernel_size': '(7)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc6"""'}), "(filters=1024, kernel_size=7, padding='same', activation='relu', name\n ='fc6')\n", (7480, 7561), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7597, 7614), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (7604, 7614), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7631, 7718), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1024)', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc7"""'}), "(filters=1024, kernel_size=1, padding='same', activation='relu', name\n ='fc7')\n", (7637, 7718), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7740, 7757), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (7747, 7757), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((7774, 7910), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'num_classes', 'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_fr"""'}), "(filters=num_classes, kernel_size=(1, 1), padding='same', activation=\n 'relu', kernel_initializer='he_normal', name='score_fr')\n", (7780, 7910), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8003, 8128), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_classes', 'kernel_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': 'None', 'name': '"""score2"""'}), "(filters=num_classes, kernel_size=(2, 2), strides=(2, 2),\n padding='valid', activation=None, name='score2')\n", (8018, 8128), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8378, 8506), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'None', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_pool4"""'}), "(num_classes, kernel_size=(1, 1), padding='same', activation=None,\n kernel_initializer='he_normal', name='score_pool4')\n", (8384, 8506), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8654, 8772), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_classes'], {'kernel_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': 'None', 'name': '"""score4"""'}), "(num_classes, kernel_size=(2, 2), strides=(2, 2), padding=\n 'valid', activation=None, name='score4')\n", (8669, 8772), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((8844, 8972), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'padding': '"""same"""', 'activation': 'None', 'kernel_initializer': '"""he_normal"""', 'name': '"""score_pool3"""'}), "(num_classes, kernel_size=(1, 1), padding='same', activation=None,\n kernel_initializer='he_normal', name='score_pool3')\n", (8850, 8972), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9153, 9278), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_classes'], {'kernel_size': '(8, 8)', 'strides': '(8, 8)', 'padding': '"""valid"""', 'activation': '"""sigmoid"""', 'name': '"""upsample"""'}), "(num_classes, kernel_size=(8, 8), strides=(8, 8), padding=\n 'valid', activation='sigmoid', name='upsample')\n", (9168, 9278), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation\n'), ((9349, 9377), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9366, 9377), False, 'from tensorflow.keras import layers\n'), ((9417, 9433), 'tensorflow.keras.layers.Softmax', 'layers.Softmax', ([], {}), '()\n', (9431, 9433), False, 'from tensorflow.keras import layers\n')] |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_Regression(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_Regression, self).__init__()
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
return conv_5, None
| [
"torch.nn.Dropout",
"tractseg.libs.PytorchUtils.conv2d",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.cat",
"tractseg.libs.PytorchUtils.deconv2d"
]
| [((1485, 1517), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_input_channels', 'n_filt'], {}), '(n_input_channels, n_filt)\n', (1491, 1517), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1543, 1565), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', 'n_filt'], {}), '(n_filt, n_filt)\n', (1549, 1565), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1588, 1608), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1600, 1608), True, 'import torch.nn as nn\n'), ((1635, 1661), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', '(n_filt * 2)'], {}), '(n_filt, n_filt * 2)\n', (1641, 1661), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1687, 1717), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 2)'], {}), '(n_filt * 2, n_filt * 2)\n', (1693, 1717), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1740, 1760), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1752, 1760), True, 'import torch.nn as nn\n'), ((1787, 1817), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 4)'], {}), '(n_filt * 2, n_filt * 4)\n', (1793, 1817), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1843, 1873), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 4)'], {}), '(n_filt * 4, n_filt * 4)\n', (1849, 1873), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1896, 1916), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (1908, 1916), True, 'import torch.nn as nn\n'), ((1943, 1973), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 8)'], {}), '(n_filt * 4, n_filt * 8)\n', (1949, 1973), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((1999, 2029), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 8)'], {}), '(n_filt * 8, n_filt * 8)\n', (2005, 2029), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2052, 2072), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)'], {}), '((2, 2))\n', (2064, 2072), True, 'import torch.nn as nn\n'), ((2097, 2114), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.4)'}), '(p=0.4)\n', (2107, 2114), True, 'import torch.nn as nn\n'), ((2140, 2171), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 16)'], {}), '(n_filt * 8, n_filt * 16)\n', (2146, 2171), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2196, 2228), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 16)', '(n_filt * 16)'], {}), '(n_filt * 16, n_filt * 16)\n', (2202, 2228), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2253, 2312), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 16)', '(n_filt * 16)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)\n', (2261, 2312), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((2464, 2508), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8 + n_filt * 16)', '(n_filt * 8)'], {}), '(n_filt * 8 + n_filt * 16, n_filt * 8)\n', (2470, 2508), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2535, 2565), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 8)', '(n_filt * 8)'], {}), '(n_filt * 8, n_filt * 8)\n', (2541, 2565), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2590, 2647), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 8)', '(n_filt * 8)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)\n', (2598, 2647), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((2729, 2782), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4 + n_filt * 8)', '(n_filt * 4)'], {'stride': '(1)'}), '(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)\n', (2735, 2782), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2809, 2849), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 4)', '(n_filt * 4)'], {'stride': '(1)'}), '(n_filt * 4, n_filt * 4, stride=1)\n', (2815, 2849), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((2874, 2931), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 4)', '(n_filt * 4)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)\n', (2882, 2931), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((3013, 3066), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2 + n_filt * 4)', '(n_filt * 2)'], {'stride': '(1)'}), '(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)\n', (3019, 3066), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3093, 3133), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt * 2)', '(n_filt * 2)'], {'stride': '(1)'}), '(n_filt * 2, n_filt * 2, stride=1)\n', (3099, 3133), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3158, 3215), 'tractseg.libs.PytorchUtils.deconv2d', 'deconv2d', (['(n_filt * 2)', '(n_filt * 2)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)\n', (3166, 3215), False, 'from tractseg.libs.PytorchUtils import deconv2d\n'), ((3297, 3342), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['(n_filt + n_filt * 2)', 'n_filt'], {'stride': '(1)'}), '(n_filt + n_filt * 2, n_filt, stride=1)\n', (3303, 3342), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3369, 3401), 'tractseg.libs.PytorchUtils.conv2d', 'conv2d', (['n_filt', 'n_filt'], {'stride': '(1)'}), '(n_filt, n_filt, stride=1)\n', (3375, 3401), False, 'from tractseg.libs.PytorchUtils import conv2d\n'), ((3425, 3500), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_filt', 'n_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (3434, 3500), True, 'import torch.nn as nn\n'), ((4304, 4339), 'torch.cat', 'torch.cat', (['[deconv_1, contr_4_2]', '(1)'], {}), '([deconv_1, contr_4_2], 1)\n', (4313, 4339), False, 'import torch\n'), ((4499, 4534), 'torch.cat', 'torch.cat', (['[deconv_2, contr_3_2]', '(1)'], {}), '([deconv_2, contr_3_2], 1)\n', (4508, 4534), False, 'import torch\n'), ((4694, 4729), 'torch.cat', 'torch.cat', (['[deconv_3, contr_2_2]', '(1)'], {}), '([deconv_3, contr_2_2], 1)\n', (4703, 4729), False, 'import torch\n'), ((4889, 4924), 'torch.cat', 'torch.cat', (['[deconv_4, contr_1_2]', '(1)'], {}), '([deconv_4, contr_1_2], 1)\n', (4898, 4924), False, 'import torch\n')] |
import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| [
"polyaxon_client.client.PolyaxonClient",
"polyaxon_k8s.manager.K8SManager",
"argparse.ArgumentParser",
"sidecar.monitor.is_pod_running",
"time.sleep"
]
| [((285, 310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (308, 310), False, 'import argparse\n'), ((920, 981), 'polyaxon_k8s.manager.K8SManager', 'K8SManager', ([], {'namespace': 'settings.K8S_NAMESPACE', 'in_cluster': '(True)'}), '(namespace=settings.K8S_NAMESPACE, in_cluster=True)\n', (930, 981), False, 'from polyaxon_k8s.manager import K8SManager\n'), ((995, 1011), 'polyaxon_client.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (1009, 1011), False, 'from polyaxon_client.client import PolyaxonClient\n'), ((1149, 1175), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (1159, 1175), False, 'import time\n'), ((1222, 1294), 'sidecar.monitor.is_pod_running', 'is_pod_running', (['k8s_manager', 'settings.POD_ID', 'container_id', 'max_restarts'], {}), '(k8s_manager, settings.POD_ID, container_id, max_restarts)\n', (1236, 1294), False, 'from sidecar.monitor import is_pod_running\n'), ((1503, 1529), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (1513, 1529), False, 'import time\n')] |
#! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| [
"nav_msgs.msg.Odometry",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Rate",
"rospy.Subscriber",
"rospy.loginfo"
]
| [((444, 484), 'rospy.init_node', 'rospy.init_node', (['"""odom_topic_subscriber"""'], {}), "('odom_topic_subscriber')\n", (459, 484), False, 'import rospy\n'), ((544, 558), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (554, 558), False, 'import rospy\n'), ((207, 272), 'rospy.Subscriber', 'rospy.Subscriber', (['self._topic_name', 'Odometry', 'self.topic_callback'], {}), '(self._topic_name, Odometry, self.topic_callback)\n', (223, 272), False, 'import rospy\n'), ((298, 308), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (306, 308), False, 'from nav_msgs.msg import Odometry\n'), ((382, 411), 'rospy.loginfo', 'rospy.loginfo', (['self._odomdata'], {}), '(self._odomdata)\n', (395, 411), False, 'import rospy\n'), ((573, 592), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (590, 592), False, 'import rospy\n')] |
"""Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml2():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_model():
try:
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.Session() as sess:
x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filter=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
except:
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filters=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.compat.v1.global_variables_initializer())
constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, [
'op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
class TestQuantization(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree("saved", ignore_errors=True)
def test_ru_random_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
def test_ru_random_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
if __name__ == "__main__":
unittest.main()
| [
"yaml.load",
"unittest.main",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"os.remove",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.Graph",
"tensorflow.compat.v1.placeholder",
"numpy.random.random",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.GraphDef",
"neural_compressor.experimental.common.DataLoader",
"tensorflow.compat.v1.graph_util.convert_variables_to_constants",
"tensorflow.nn.conv2d",
"tensorflow.compat.v1.GraphDef",
"yaml.dump",
"tensorflow.import_graph_def",
"tensorflow.global_variables_initializer",
"shutil.rmtree",
"neural_compressor.experimental.Quantization"
]
| [((619, 663), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (628, 663), False, 'import yaml\n'), ((1296, 1340), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (1305, 1340), False, 'import yaml\n'), ((4415, 4430), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4428, 4430), False, 'import unittest\n'), ((735, 750), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (744, 750), False, 'import yaml\n'), ((1413, 1428), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (1422, 1428), False, 'import yaml\n'), ((1500, 1510), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1508, 1510), True, 'import tensorflow as tf\n'), ((1532, 1545), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1543, 1545), True, 'import tensorflow as tf\n'), ((3374, 3401), 'os.remove', 'os.remove', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3383, 3401), False, 'import os\n'), ((3411, 3439), 'os.remove', 'os.remove', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (3420, 3439), False, 'import os\n'), ((3451, 3493), 'shutil.rmtree', 'shutil.rmtree', (['"""saved"""'], {'ignore_errors': '(True)'}), "('saved', ignore_errors=True)\n", (3464, 3493), False, 'import shutil\n'), ((3633, 3663), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3645, 3663), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3776, 3802), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3793, 3802), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3840, 3866), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3857, 3866), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4075, 4106), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (4087, 4106), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4219, 4245), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4236, 4245), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4283, 4309), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4300, 4309), False, 'from neural_compressor.experimental import Quantization, common\n'), ((1560, 1572), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1570, 1572), True, 'import tensorflow as tf\n'), ((1599, 1655), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (1613, 1655), True, 'import tensorflow as tf\n'), ((1745, 1840), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filter': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filter=y, strides=[1, 1, 1, 1], padding='VALID', name\n ='op_to_store')\n", (1757, 1840), True, 'import tensorflow as tf\n'), ((1957, 2045), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def, [\n 'op_to_store'])\n", (2001, 2045), True, 'import tensorflow as tf\n'), ((2179, 2218), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2198, 2218), True, 'import tensorflow as tf\n'), ((2249, 2259), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2257, 2259), True, 'import tensorflow as tf\n'), ((2281, 2304), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (2302, 2304), True, 'import tensorflow as tf\n'), ((1685, 1715), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (1701, 1715), True, 'import numpy as np\n'), ((1892, 1925), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1923, 1925), True, 'import tensorflow as tf\n'), ((2319, 2341), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2339, 2341), True, 'import tensorflow as tf\n'), ((2368, 2434), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (2392, 2434), True, 'import tensorflow as tf\n'), ((2534, 2629), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filters': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filters=y, strides=[1, 1, 1, 1], padding='VALID',\n name='op_to_store')\n", (2546, 2629), True, 'import tensorflow as tf\n'), ((2757, 2854), 'tensorflow.compat.v1.graph_util.convert_variables_to_constants', 'tf.compat.v1.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def,\n ['op_to_store'])\n", (2811, 2854), True, 'import tensorflow as tf\n'), ((3057, 3096), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3076, 3096), True, 'import tensorflow as tf\n'), ((2474, 2504), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (2490, 2504), True, 'import numpy as np\n'), ((2682, 2725), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2723, 2725), True, 'import tensorflow as tf\n')] |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EngineClient."""
import datetime
from unittest import mock
import pytest
from google.api_core import exceptions
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.timestamp_pb2 import Timestamp
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import enums as qenums
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
def setup_mock_(client_constructor):
grpc_client = mock.Mock()
client_constructor.return_value = grpc_client
return grpc_client
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.create_quantum_program.return_value = result
code = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_program('proj', 'prog', code, 'A program',
labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program',
labels=labels), False)
assert client.create_program('proj', 'prog', code,
'A program') == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program'), False)
assert client.create_program('proj', 'prog', code,
labels=labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
labels=labels), False)
assert client.create_program('proj', 'prog', code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code), False)
assert client.create_program('proj', program_id=None,
code=code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj', qtypes.QuantumProgram(code=code), False)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.get_quantum_program.return_value = result
client = EngineClient()
assert client.get_program('proj', 'prog', False) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert client.get_program('proj', 'prog', True) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProgram(name='projects/proj/programs/prog1'),
qtypes.QuantumProgram(name='projects/proj/programs/prog2')
]
grpc_client.list_quantum_programs.return_value = results
client = EngineClient()
assert client.list_programs(project_id='proj') == results
assert grpc_client.list_quantum_programs.call_args[0] == ('projects/proj',)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, created_after, created_before, labels',
[
('',
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:*',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters(client_constructor, expected_filter,
created_before, created_after, labels):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_programs(project_id='proj',
created_before=created_before,
created_after=created_after,
has_labels=labels)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters_invalid_type(client_constructor):
with pytest.raises(ValueError, match=""):
EngineClient().list_programs(project_id='proj',
created_before="Unsupported date/time")
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.set_program_description('proj', 'prog', 'A program') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
description='A program'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_program_description('proj', 'prog', '') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.return_value = qtypes.QuantumProgram(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_program_labels('proj', 'prog', labels) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_program_labels('proj', 'prog', {}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.add_program_labels('proj', 'prog',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.add_program_labels('proj', 'prog',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_program_labels('proj', 'prog', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.remove_program_labels('proj', 'prog', ['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_program_labels('proj', 'prog',
['hello', 'weather']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_program_labels('proj', 'prog',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_program('proj', 'prog')
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert not client.delete_program('proj', 'prog', delete_jobs=True)
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.create_quantum_job.return_value = result
run_context = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10, 'A job',
labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job',
labels=labels), False)
assert client.create_job(
'proj',
'prog',
'job0',
['processor0'],
run_context,
10,
'A job',
) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job'), False)
assert client.create_job('proj',
'prog',
'job0', ['processor0'],
run_context,
10,
labels=labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
labels=labels), False)
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
assert client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
with pytest.raises(ValueError, match='priority must be between 0 and 1000'):
client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=5000)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_job.return_value = result
client = EngineClient()
assert client.get_job('proj', 'prog', 'job0', False) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', False)
assert client.get_job('proj', 'prog', 'job0', True) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
description='A job'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_job_description('proj', 'prog', 'job0', '') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_job.return_value = qtypes.QuantumJob(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_job_labels('proj', 'prog', 'job0', labels) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_job_labels('proj', 'prog', 'job0', {}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.add_job_labels('proj', 'prog', 'job0',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_job.call_count == 0
assert client.add_job_labels('proj', 'prog', 'job0',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_job_labels('proj', 'prog', 'job0', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.remove_job_labels('proj', 'prog', 'job0',
['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_job_labels('proj', 'prog', 'job0',
['hello', 'weather']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_job_labels('proj', 'prog', 'job0',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_job('proj', 'prog', 'job0')
assert grpc_client.delete_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.cancel_job('proj', 'prog', 'job0')
assert grpc_client.cancel_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_job_results(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumResult(
parent='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_result.return_value = result
client = EngineClient()
assert client.get_job_results('proj', 'prog', 'job0') == result
assert grpc_client.get_quantum_result.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job1'),
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job2')
]
grpc_client.list_quantum_jobs.return_value = results
client = EngineClient()
assert client.list_jobs(project_id='proj', program_id='prog1') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/prog1',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
assert client.list_jobs(project_id='proj') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/-',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, '
'created_after, '
'created_before, '
'labels, '
'execution_states',
[
('',
None,
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None,
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None,
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
None
),
('(execution_status.state = FAILURE OR '
'execution_status.state = CANCELLED)',
None,
None,
None,
[quantum.enums.ExecutionStatus.State.FAILURE,
quantum.enums.ExecutionStatus.State.CANCELLED,]
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:* AND '
'(execution_status.state = SUCCESS)',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
[quantum.enums.ExecutionStatus.State.SUCCESS,],
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs_filters(client_constructor, expected_filter, created_before,
created_after, labels, execution_states):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_jobs(project_id='proj',
program_id='prog',
created_before=created_before,
created_after=created_after,
has_labels=labels,
execution_states=execution_states)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_processors(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProcessor(name='projects/proj/processor/processor0'),
qtypes.QuantumProcessor(name='projects/proj/processor/processor1')
]
grpc_client.list_quantum_processors.return_value = results
client = EngineClient()
assert client.list_processors('proj') == results
assert grpc_client.list_quantum_processors.call_args[0] == (
'projects/proj',)
assert grpc_client.list_quantum_processors.call_args[1] == {
'filter_': '',
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_processor(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProcessor(name='projects/proj/processors/processor0')
grpc_client.get_quantum_processor.return_value = result
client = EngineClient()
assert client.get_processor('proj', 'processor0') == result
assert grpc_client.get_quantum_processor.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_calibrations(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumCalibration(
name='projects/proj/processor/processor0/calibrations/123456'),
qtypes.QuantumCalibration(
name='projects/proj/processor/processor1/calibrations/224466')
]
grpc_client.list_quantum_calibrations.return_value = results
client = EngineClient()
assert client.list_calibrations('proj', 'processor0') == results
assert grpc_client.list_quantum_calibrations.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_calibration('proj', 'processor0', 123456) == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/123456',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_does_not_exist(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') is None
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_error(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_current_calibration('proj', 'processor0')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_doesnt_retry_not_found_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
with pytest.raises(EngineException, match='not found'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count == 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_retry_5xx_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.ServiceUnavailable(
'internal error')
client = EngineClient(max_retry_delay_seconds=1)
with pytest.raises(TimeoutError,
match='Reached max retry attempts.*internal error'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count > 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
start = datetime.datetime.fromtimestamp(1000000000)
end = datetime.datetime.fromtimestamp(1000003600)
users = ['<EMAIL>']
result = qtypes.QuantumReservation(
name='projects/proj/processors/processor0/reservations/papar-party-44',
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000003600),
whitelisted_users=users,
)
grpc_client.create_quantum_reservation.return_value = result
client = EngineClient()
assert client.create_reservation('proj', 'processor0', start, end,
users) == result
assert grpc_client.create_quantum_reservation.call_count == 1
kwargs = grpc_client.create_quantum_reservation.call_args[1]
# The outgoing argument will not have the resource name
result.name = ''
assert kwargs == {
'parent': 'projects/proj/processors/processor0',
'quantum_reservation': result
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.cancel_quantum_reservation.return_value = result
client = EngineClient()
assert (client.cancel_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.cancel_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.delete_quantum_reservation.return_value = result
client = EngineClient()
assert (client.delete_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.delete_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.get_quantum_reservation.return_value = result
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_not_found(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
grpc_client.get_quantum_reservation.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == None)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_exception(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_reservation.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_reservation('proj', 'processor0', 'goog')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
results = [
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
),
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1200000000),
end_time=Timestamp(seconds=1200002000),
whitelisted_users=['<EMAIL>'],
),
]
grpc_client.list_quantum_reservations.return_value = results
client = EngineClient()
assert (client.list_reservations('proj', 'processor0') == results)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000001000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
start=datetime.datetime.fromtimestamp(1000001000),
end=datetime.datetime.fromtimestamp(1000002000),
whitelisted_users=['<EMAIL>'],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name':
name,
'quantum_reservation':
result,
'update_mask':
FieldMask(paths=['start_time', 'end_time', 'whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation_remove_all_users(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
whitelisted_users=[],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
whitelisted_users=[],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
'quantum_reservation': result,
'update_mask': FieldMask(paths=['whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_time_slots(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000020000),
end_time=Timestamp(seconds=1000040000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
maintenance_config=qtypes.QuantumTimeSlot.MaintenanceConfig(
title='Testing',
description='Testing some new configuration.',
),
),
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000010000),
end_time=Timestamp(seconds=1000020000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.RESERVATION,
reservation_config=qtypes.QuantumTimeSlot.ReservationConfig(
project_id='super_secret_quantum'),
)
]
grpc_client.list_quantum_time_slots.return_value = results
client = EngineClient()
assert (client.list_time_slots('proj', 'processor0') == results)
| [
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumReservation",
"cirq.google.engine.client.quantum_v1alpha1.types.any_pb2.Any",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumResult",
"google.api_core.exceptions.BadRequest",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.MaintenanceConfig",
"datetime.datetime",
"cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector",
"datetime.date",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob",
"google.api_core.exceptions.ServiceUnavailable",
"unittest.mock.Mock",
"google.api_core.exceptions.NotFound",
"google.protobuf.timestamp_pb2.Timestamp",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.ReservationConfig",
"pytest.raises",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration",
"google.protobuf.field_mask_pb2.FieldMask",
"datetime.datetime.fromtimestamp",
"cirq.google.engine.engine_client.EngineClient",
"cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor",
"cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask",
"unittest.mock.patch.object"
]
| [((1222, 1293), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (1239, 1293), False, 'from unittest import mock\n'), ((3338, 3409), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (3355, 3409), False, 'from unittest import mock\n'), ((4003, 4074), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (4020, 4074), False, 'from unittest import mock\n'), ((6020, 6091), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6037, 6091), False, 'from unittest import mock\n'), ((6613, 6684), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6630, 6684), False, 'from unittest import mock\n'), ((6931, 7002), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (6948, 7002), False, 'from unittest import mock\n'), ((7949, 8020), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (7966, 8020), False, 'from unittest import mock\n'), ((9317, 9388), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (9334, 9388), False, 'from unittest import mock\n'), ((11437, 11508), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (11454, 11508), False, 'from unittest import mock\n'), ((13165, 13236), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (13182, 13236), False, 'from unittest import mock\n'), ((13708, 13779), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (13725, 13779), False, 'from unittest import mock\n'), ((17933, 18004), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (17950, 18004), False, 'from unittest import mock\n'), ((18616, 18687), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (18633, 18687), False, 'from unittest import mock\n'), ((19652, 19723), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (19669, 19723), False, 'from unittest import mock\n'), ((21030, 21101), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (21047, 21101), False, 'from unittest import mock\n'), ((23104, 23175), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (23121, 23175), False, 'from unittest import mock\n'), ((24858, 24929), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (24875, 24929), False, 'from unittest import mock\n'), ((25220, 25291), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (25237, 25291), False, 'from unittest import mock\n'), ((25582, 25653), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (25599, 25653), False, 'from unittest import mock\n'), ((26105, 26176), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (26122, 26176), False, 'from unittest import mock\n'), ((28963, 29034), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (28980, 29034), False, 'from unittest import mock\n'), ((29644, 29715), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (29661, 29715), False, 'from unittest import mock\n'), ((30319, 30390), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (30336, 30390), False, 'from unittest import mock\n'), ((30834, 30905), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (30851, 30905), False, 'from unittest import mock\n'), ((31529, 31600), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (31546, 31600), False, 'from unittest import mock\n'), ((32111, 32182), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (32128, 32182), False, 'from unittest import mock\n'), ((32702, 32773), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (32719, 32773), False, 'from unittest import mock\n'), ((33228, 33299), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (33245, 33299), False, 'from unittest import mock\n'), ((33652, 33723), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (33669, 33723), False, 'from unittest import mock\n'), ((34131, 34202), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (34148, 34202), False, 'from unittest import mock\n'), ((34689, 34760), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (34706, 34760), False, 'from unittest import mock\n'), ((35809, 35880), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (35826, 35880), False, 'from unittest import mock\n'), ((36599, 36670), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (36616, 36670), False, 'from unittest import mock\n'), ((37389, 37460), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (37406, 37460), False, 'from unittest import mock\n'), ((38164, 38235), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (38181, 38235), False, 'from unittest import mock\n'), ((38779, 38850), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (38796, 38850), False, 'from unittest import mock\n'), ((39198, 39269), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (39215, 39269), False, 'from unittest import mock\n'), ((40070, 40141), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (40087, 40141), False, 'from unittest import mock\n'), ((41159, 41230), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (41176, 41230), False, 'from unittest import mock\n'), ((41983, 42054), 'unittest.mock.patch.object', 'mock.patch.object', (['quantum', '"""QuantumEngineServiceClient"""'], {'autospec': '(True)'}), "(quantum, 'QuantumEngineServiceClient', autospec=True)\n", (42000, 42054), False, 'from unittest import mock\n'), ((1134, 1145), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1143, 1145), False, 'from unittest import mock\n'), ((1403, 1460), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (1424, 1460), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((1534, 1554), 'cirq.google.engine.client.quantum_v1alpha1.types.any_pb2.Any', 'qtypes.any_pb2.Any', ([], {}), '()\n', (1552, 1554), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((1600, 1614), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (1612, 1614), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((3516, 3573), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (3537, 3573), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((3646, 3660), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (3658, 3660), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((4401, 4415), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (4413, 4415), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((6292, 6306), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (6304, 6306), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((7121, 7178), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (7142, 7178), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7254, 7268), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (7266, 7268), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((8172, 8278), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (8193, 8278), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8351, 8408), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (8372, 8408), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8484, 8498), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (8496, 8498), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((9504, 9610), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (9525, 9610), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9747, 9804), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (9768, 9804), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9880, 9894), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (9892, 9894), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((11627, 11733), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (11648, 11733), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((11870, 11927), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (11891, 11927), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12003, 12017), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (12015, 12017), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((13346, 13360), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (13358, 13360), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((13885, 13948), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (13902, 13948), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14025, 14045), 'cirq.google.engine.client.quantum_v1alpha1.types.any_pb2.Any', 'qtypes.any_pb2.Any', ([], {}), '()\n', (14043, 14045), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14091, 14105), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (14103, 14105), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((18107, 18170), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (18124, 18170), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((18239, 18253), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (18251, 18253), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((18802, 18865), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (18819, 18865), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((18937, 18951), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (18949, 18951), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((19867, 19969), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (19884, 19969), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20042, 20105), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (20059, 20105), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20177, 20191), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (20189, 20191), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((21213, 21315), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (21230, 21315), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((21444, 21507), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (21461, 21507), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((21579, 21593), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (21591, 21593), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((23290, 23392), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'labels': "{'color': 'red', 'weather': 'sun', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(labels={'color': 'red', 'weather': 'sun', 'run': '1'},\n label_fingerprint='hash')\n", (23307, 23392), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23521, 23584), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (23538, 23584), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23656, 23670), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (23668, 23670), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25035, 25049), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25047, 25049), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25397, 25411), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25409, 25411), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((25760, 25828), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumResult', 'qtypes.QuantumResult', ([], {'parent': '"""projects/proj/programs/prog/jobs/job0"""'}), "(parent='projects/proj/programs/prog/jobs/job0')\n", (25780, 25828), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((25909, 25923), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (25921, 25923), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((26508, 26522), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (26520, 26522), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((29247, 29261), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (29259, 29261), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((30063, 30077), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (30075, 30077), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((30499, 30566), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processors/processor0"""'}), "(name='projects/proj/processors/processor0')\n", (30522, 30566), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((30641, 30655), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (30653, 30655), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((31327, 31341), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (31339, 31341), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((31711, 31805), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processors/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processors/processor0/calibrations/123456')\n", (31736, 31805), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31886, 31900), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (31898, 31900), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((32301, 32395), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processors/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processors/processor0/calibrations/123456')\n", (32326, 32395), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((32476, 32490), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (32488, 32490), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((32948, 32980), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (32967, 32980), False, 'from google.api_core import exceptions\n'), ((33004, 33018), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33016, 33018), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((33465, 33494), 'google.api_core.exceptions.BadRequest', 'exceptions.BadRequest', (['"""boom"""'], {}), "('boom')\n", (33486, 33494), False, 'from google.api_core import exceptions\n'), ((33518, 33532), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33530, 33532), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((33888, 33920), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (33907, 33920), False, 'from google.api_core import exceptions\n'), ((33944, 33958), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (33956, 33958), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((34354, 34401), 'google.api_core.exceptions.ServiceUnavailable', 'exceptions.ServiceUnavailable', (['"""internal error"""'], {}), "('internal error')\n", (34383, 34401), False, 'from google.api_core import exceptions\n'), ((34425, 34464), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {'max_retry_delay_seconds': '(1)'}), '(max_retry_delay_seconds=1)\n', (34437, 34464), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((34872, 34915), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000000000)'], {}), '(1000000000)\n', (34903, 34915), False, 'import datetime\n'), ((34926, 34969), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000003600)'], {}), '(1000003600)\n', (34957, 34969), False, 'import datetime\n'), ((35330, 35344), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (35342, 35344), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((36338, 36352), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (36350, 36352), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((37128, 37142), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (37140, 37142), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((37912, 37926), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (37924, 37926), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((38473, 38505), 'google.api_core.exceptions.NotFound', 'exceptions.NotFound', (['"""not found"""'], {}), "('not found')\n", (38492, 38505), False, 'from google.api_core import exceptions\n'), ((38529, 38543), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (38541, 38543), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((39011, 39040), 'google.api_core.exceptions.BadRequest', 'exceptions.BadRequest', (['"""boom"""'], {}), "('boom')\n", (39032, 39040), False, 'from google.api_core import exceptions\n'), ((39064, 39078), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (39076, 39078), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((39981, 39995), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (39993, 39995), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((40599, 40613), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (40611, 40613), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((41437, 41495), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumReservation', 'qtypes.QuantumReservation', ([], {'name': 'name', 'whitelisted_users': '[]'}), '(name=name, whitelisted_users=[])\n', (41462, 41495), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((41598, 41612), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (41610, 41612), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((43078, 43092), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (43090, 43092), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((4193, 4251), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog1"""'}), "(name='projects/proj/programs/prog1')\n", (4214, 4251), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((4261, 4319), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog2"""'}), "(name='projects/proj/programs/prog2')\n", (4282, 4319), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((6758, 6793), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""""""'}), "(ValueError, match='')\n", (6771, 6793), False, 'import pytest\n'), ((17603, 17673), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""priority must be between 0 and 1000"""'}), "(ValueError, match='priority must be between 0 and 1000')\n", (17616, 17673), False, 'import pytest\n'), ((26292, 26356), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog1/jobs/job1"""'}), "(name='projects/proj/programs/prog1/jobs/job1')\n", (26309, 26356), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((26366, 26430), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog1/jobs/job2"""'}), "(name='projects/proj/programs/prog1/jobs/job2')\n", (26383, 26430), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((29837, 29903), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processor/processor0"""'}), "(name='projects/proj/processor/processor0')\n", (29860, 29903), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((29913, 29979), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProcessor', 'qtypes.QuantumProcessor', ([], {'name': '"""projects/proj/processor/processor1"""'}), "(name='projects/proj/processor/processor1')\n", (29936, 29979), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31029, 31122), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processor/processor0/calibrations/123456"""'}), "(name=\n 'projects/proj/processor/processor0/calibrations/123456')\n", (31054, 31122), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((31140, 31233), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumCalibration', 'qtypes.QuantumCalibration', ([], {'name': '"""projects/proj/processor/processor1/calibrations/224466"""'}), "(name=\n 'projects/proj/processor/processor1/calibrations/224466')\n", (31165, 31233), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((33542, 33586), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""boom"""'}), "(EngineException, match='boom')\n", (33555, 33586), False, 'import pytest\n'), ((33968, 34017), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""not found"""'}), "(EngineException, match='not found')\n", (33981, 34017), False, 'import pytest\n'), ((34474, 34553), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {'match': '"""Reached max retry attempts.*internal error"""'}), "(TimeoutError, match='Reached max retry attempts.*internal error')\n", (34487, 34553), False, 'import pytest\n'), ((39088, 39132), 'pytest.raises', 'pytest.raises', (['EngineException'], {'match': '"""boom"""'}), "(EngineException, match='boom')\n", (39101, 39132), False, 'import pytest\n'), ((1841, 1953), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'description': '"""A program"""', 'labels': 'labels'}), "(name='projects/proj/programs/prog', code=code,\n description='A program', labels=labels)\n", (1862, 1953), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2267, 2364), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'description': '"""A program"""'}), "(name='projects/proj/programs/prog', code=code,\n description='A program')\n", (2288, 2364), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2650, 2738), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code', 'labels': 'labels'}), "(name='projects/proj/programs/prog', code=code, labels\n =labels)\n", (2671, 2738), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((2975, 3043), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'code': 'code'}), "(name='projects/proj/programs/prog', code=code)\n", (2996, 3043), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((3294, 3326), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'code': 'code'}), '(code=code)\n', (3315, 3326), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((4880, 4905), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (4893, 4905), False, 'import datetime\n'), ((4994, 5062), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)\n', (5011, 5062), False, 'import datetime\n'), ((5199, 5225), 'datetime.date', 'datetime.date', (['(2020)', '(10)', '(1)'], {}), '(2020, 10, 1)\n', (5212, 5225), False, 'import datetime\n'), ((5326, 5351), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (5339, 5351), False, 'import datetime\n'), ((5365, 5434), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(10)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 10, tzinfo=datetime.timezone.utc)\n', (5382, 5434), False, 'import datetime\n'), ((5804, 5829), 'datetime.date', 'datetime.date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (5817, 5829), False, 'import datetime\n'), ((5843, 5902), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, tzinfo=datetime.timezone.utc)\n', (5860, 5902), False, 'import datetime\n'), ((7461, 7548), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'description': '"""A program"""'}), "(name='projects/proj/programs/prog', description=\n 'A program')\n", (7482, 7548), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7583, 7637), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (7614, 7637), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7823, 7880), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""'}), "(name='projects/proj/programs/prog')\n", (7844, 7880), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((7890, 7944), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (7921, 7944), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8742, 8844), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': 'labels', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels=labels,\n label_fingerprint='hash')\n", (8763, 8844), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((8910, 8959), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (8941, 8959), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9140, 9228), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', label_fingerprint\n ='hash')\n", (9161, 9228), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((9263, 9312), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (9294, 9312), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10309, 10473), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (10330, 10473), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10703, 10752), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (10734, 10752), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((10988, 11153), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (11009, 11153), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((11383, 11432), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (11414, 11432), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12399, 12523), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'labels': "{'color': 'red', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', labels={'color':\n 'red', 'run': '1'}, label_fingerprint='hash')\n", (12420, 12523), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12690, 12739), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (12721, 12739), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((12988, 13076), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumProgram', 'qtypes.QuantumProgram', ([], {'name': '"""projects/proj/programs/prog"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog', label_fingerprint\n ='hash')\n", (13009, 13076), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((13111, 13160), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (13142, 13160), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19150, 19239), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'description': '"""A job"""'}), "(name='projects/proj/programs/prog/jobs/job0', description\n ='A job')\n", (19167, 19239), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19270, 19324), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (19301, 19324), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19520, 19583), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""'}), "(name='projects/proj/programs/prog/jobs/job0')\n", (19537, 19583), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((19593, 19647), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['description']"}), "(paths=['description'])\n", (19624, 19647), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20445, 20554), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': 'labels', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels=\n labels, label_fingerprint='hash')\n", (20462, 20554), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20611, 20660), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (20642, 20660), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20851, 20944), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0',\n label_fingerprint='hash')\n", (20868, 20944), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((20976, 21025), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (21007, 21025), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22010, 22181), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'red', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (22027, 22181), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22382, 22431), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (22413, 22431), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((22677, 22849), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'blue', 'weather': 'sun', 'run': '1', 'hello': 'world'},\n label_fingerprint='hash')\n", (22694, 22849), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((23050, 23099), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (23081, 23099), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24098, 24229), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'labels': "{'color': 'red', 'run': '1'}", 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0', labels={\n 'color': 'red', 'run': '1'}, label_fingerprint='hash')\n", (24115, 24229), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24375, 24424), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (24406, 24424), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24679, 24772), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumJob', 'qtypes.QuantumJob', ([], {'name': '"""projects/proj/programs/prog/jobs/job0"""', 'label_fingerprint': '"""hash"""'}), "(name='projects/proj/programs/prog/jobs/job0',\n label_fingerprint='hash')\n", (24696, 24772), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((24804, 24853), 'cirq.google.engine.client.quantum_v1alpha1.types.field_mask_pb2.FieldMask', 'qtypes.field_mask_pb2.FieldMask', ([], {'paths': "['labels']"}), "(paths=['labels'])\n", (24835, 24853), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((27342, 27367), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (27355, 27367), False, 'import datetime\n'), ((27474, 27542), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)\n', (27491, 27542), False, 'import datetime\n'), ((27697, 27723), 'datetime.date', 'datetime.date', (['(2020)', '(10)', '(1)'], {}), '(2020, 10, 1)\n', (27710, 27723), False, 'import datetime\n'), ((27842, 27867), 'datetime.date', 'datetime.date', (['(2020)', '(9)', '(1)'], {}), '(2020, 9, 1)\n', (27855, 27867), False, 'import datetime\n'), ((27881, 27950), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)', '(0)', '(0)', '(10)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, 0, 0, 10, tzinfo=datetime.timezone.utc)\n', (27898, 27950), False, 'import datetime\n'), ((28687, 28712), 'datetime.date', 'datetime.date', (['(2020)', '(8)', '(1)'], {}), '(2020, 8, 1)\n', (28700, 28712), False, 'import datetime\n'), ((28726, 28785), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(9)', '(1)'], {'tzinfo': 'datetime.timezone.utc'}), '(2020, 9, 1, tzinfo=datetime.timezone.utc)\n', (28743, 28785), False, 'import datetime\n'), ((35133, 35162), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (35142, 35162), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((35181, 35210), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000003600)'}), '(seconds=1000003600)\n', (35190, 35210), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36135, 36164), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (36144, 36164), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36183, 36212), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (36192, 36212), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36925, 36954), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (36934, 36954), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((36973, 37002), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (36982, 37002), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((37712, 37741), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (37721, 37741), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((37760, 37789), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (37769, 37789), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40396, 40425), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000001000)'}), '(seconds=1000001000)\n', (40405, 40425), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40444, 40473), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (40453, 40473), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((41085, 41149), 'google.protobuf.field_mask_pb2.FieldMask', 'FieldMask', ([], {'paths': "['start_time', 'end_time', 'whitelisted_users']"}), "(paths=['start_time', 'end_time', 'whitelisted_users'])\n", (41094, 41149), False, 'from google.protobuf.field_mask_pb2 import FieldMask\n'), ((41935, 41973), 'google.protobuf.field_mask_pb2.FieldMask', 'FieldMask', ([], {'paths': "['whitelisted_users']"}), "(paths=['whitelisted_users'])\n", (41944, 41973), False, 'from google.protobuf.field_mask_pb2 import FieldMask\n'), ((6803, 6817), 'cirq.google.engine.engine_client.EngineClient', 'EngineClient', ([], {}), '()\n', (6815, 6817), False, 'from cirq.google.engine.engine_client import EngineClient, EngineException\n'), ((39541, 39570), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000000000)'}), '(seconds=1000000000)\n', (39550, 39570), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39593, 39622), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000002000)'}), '(seconds=1000002000)\n', (39602, 39622), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39759, 39788), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1200000000)'}), '(seconds=1200000000)\n', (39768, 39788), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((39811, 39840), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1200002000)'}), '(seconds=1200002000)\n', (39820, 39840), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((40731, 40774), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000001000)'], {}), '(1000001000)\n', (40762, 40774), False, 'import datetime\n'), ((40788, 40831), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(1000002000)'], {}), '(1000002000)\n', (40819, 40831), False, 'import datetime\n'), ((42262, 42291), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000020000)'}), '(seconds=1000020000)\n', (42271, 42291), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42314, 42343), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000040000)'}), '(seconds=1000040000)\n', (42323, 42343), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42447, 42556), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.MaintenanceConfig', 'qtypes.QuantumTimeSlot.MaintenanceConfig', ([], {'title': '"""Testing"""', 'description': '"""Testing some new configuration."""'}), "(title='Testing', description=\n 'Testing some new configuration.')\n", (42487, 42556), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((42706, 42735), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000010000)'}), '(seconds=1000010000)\n', (42715, 42735), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42758, 42787), 'google.protobuf.timestamp_pb2.Timestamp', 'Timestamp', ([], {'seconds': '(1000020000)'}), '(seconds=1000020000)\n', (42767, 42787), False, 'from google.protobuf.timestamp_pb2 import Timestamp\n'), ((42891, 42966), 'cirq.google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot.ReservationConfig', 'qtypes.QuantumTimeSlot.ReservationConfig', ([], {'project_id': '"""super_secret_quantum"""'}), "(project_id='super_secret_quantum')\n", (42931, 42966), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((14627, 14730), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (14668, 14730), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((15336, 15439), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (15377, 15439), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((16106, 16209), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (16147, 16209), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((16739, 16842), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (16780, 16842), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n'), ((17453, 17556), 'cirq.google.engine.client.quantum_v1alpha1.types.SchedulingConfig.ProcessorSelector', 'qtypes.SchedulingConfig.ProcessorSelector', ([], {'processor_names': "['projects/proj/processors/processor0']"}), "(processor_names=[\n 'projects/proj/processors/processor0'])\n", (17494, 17556), True, 'from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v0.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler"
]
| [((2991, 3106), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""google.ads.googleads.v0.services.MediaFileService"""', 'rpc_method_handlers'], {}), "(\n 'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)\n", (3027, 3106), False, 'import grpc\n'), ((2180, 2528), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetMediaFile'], {'request_deserializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString', 'response_serializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString'}), '(servicer.GetMediaFile,\n request_deserializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .GetMediaFileRequest.FromString, response_serializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2\n .MediaFile.SerializeToString)\n', (2215, 2528), False, 'import grpc\n'), ((2571, 2950), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.MutateMediaFiles'], {'request_deserializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString', 'response_serializer': 'google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString'}), '(servicer.MutateMediaFiles,\n request_deserializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .MutateMediaFilesRequest.FromString, response_serializer=\n google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2\n .MutateMediaFilesResponse.SerializeToString)\n', (2606, 2950), False, 'import grpc\n')] |
from __future__ import print_function, absolute_import
from sentry import analytics
from sentry.signals import join_request_created, join_request_link_viewed
@join_request_created.connect(weak=False)
def record_join_request_created(member, **kwargs):
analytics.record(
"join_request.created", member_id=member.id, organization_id=member.organization_id
)
@join_request_link_viewed.connect(weak=False)
def record_join_request_link_viewed(organization, **kwargs):
analytics.record("join_request.link_viewed", organization_id=organization.id)
| [
"sentry.signals.join_request_created.connect",
"sentry.signals.join_request_link_viewed.connect",
"sentry.analytics.record"
]
| [((162, 202), 'sentry.signals.join_request_created.connect', 'join_request_created.connect', ([], {'weak': '(False)'}), '(weak=False)\n', (190, 202), False, 'from sentry.signals import join_request_created, join_request_link_viewed\n'), ((377, 421), 'sentry.signals.join_request_link_viewed.connect', 'join_request_link_viewed.connect', ([], {'weak': '(False)'}), '(weak=False)\n', (409, 421), False, 'from sentry.signals import join_request_created, join_request_link_viewed\n'), ((258, 363), 'sentry.analytics.record', 'analytics.record', (['"""join_request.created"""'], {'member_id': 'member.id', 'organization_id': 'member.organization_id'}), "('join_request.created', member_id=member.id,\n organization_id=member.organization_id)\n", (274, 363), False, 'from sentry import analytics\n'), ((487, 564), 'sentry.analytics.record', 'analytics.record', (['"""join_request.link_viewed"""'], {'organization_id': 'organization.id'}), "('join_request.link_viewed', organization_id=organization.id)\n", (503, 564), False, 'from sentry import analytics\n')] |
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from django.conf import settings
from blog.sitemaps import ArticleSitemap
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| [
"django.views.generic.TemplateView.as_view",
"django.conf.urls.include",
"django.conf.urls.url"
]
| [((313, 344), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (316, 344), False, 'from django.conf.urls import url, include\n'), ((456, 547), 'django.conf.urls.url', 'url', (['"""^sitemap\\\\.xml$"""', 'sitemap', "{'sitemaps': {'blog': ArticleSitemap}}"], {'name': '"""sitemap"""'}), "('^sitemap\\\\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}},\n name='sitemap')\n", (459, 547), False, 'from django.conf.urls import url, include\n'), ((374, 449), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""robots.txt"""', 'content_type': '"""text/plain"""'}), "(template_name='robots.txt', content_type='text/plain')\n", (394, 449), False, 'from django.views.generic import TemplateView\n'), ((560, 580), 'django.conf.urls.include', 'include', (['"""blog.urls"""'], {}), "('blog.urls')\n", (567, 580), False, 'from django.conf.urls import url, include\n'), ((679, 706), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (686, 706), False, 'from django.conf.urls import url, include\n')] |
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| [
"logging.getLogger",
"numpy.sqrt",
"numpy.polyfit",
"ion_functions.utils.islogical",
"numpy.column_stack",
"numpy.asanyarray",
"ion_functions.utils.isnumeric",
"numpy.array",
"numpy.sin",
"ion_functions.qc.qc_extensions.gradientvalues",
"numpy.fix",
"ion_functions.qc.qc_extensions.ntp_to_month",
"numpy.diff",
"numpy.max",
"numpy.polyval",
"numpy.empty",
"ooi.logging.log.warn",
"numpy.min",
"ion_functions.qc.qc_extensions.stuckvalues",
"ion_functions.utils.isvector",
"ion_functions.utils.ismatrix",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"ion_functions.qc.qc_extensions.spikevalues",
"numpy.interp",
"numpy.cos",
"ion_functions.utils.isscalar",
"numpy.std",
"time.gmtime",
"numpy.atleast_1d",
"numpy.arcsin",
"numpy.zeros",
"numpy.all",
"ion_functions.utils.isreal"
]
| [((2766, 2784), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (2779, 2784), True, 'import numpy as np\n'), ((2798, 2819), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (2811, 2819), True, 'import numpy as np\n'), ((9354, 9374), 'numpy.max', 'np.max', (['datlim[:, 1]'], {}), '(datlim[:, 1])\n', (9360, 9374), True, 'import numpy as np\n'), ((9390, 9410), 'numpy.min', 'np.min', (['datlim[:, 0]'], {}), '(datlim[:, 0])\n', (9396, 9410), True, 'import numpy as np\n'), ((12027, 12045), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (12040, 12045), True, 'import numpy as np\n'), ((12664, 12698), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (12677, 12698), True, 'import numpy as np\n'), ((12714, 12741), 'ion_functions.qc.qc_extensions.spikevalues', 'spikevalues', (['dat', 'L', 'N', 'acc'], {}), '(dat, L, N, acc)\n', (12725, 12741), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((15208, 15226), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (15221, 15226), True, 'import numpy as np\n'), ((15235, 15251), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (15248, 15251), True, 'import numpy as np\n'), ((16104, 16129), 'numpy.polyfit', 'np.polyfit', (['t', 'dat', 'ord_n'], {}), '(t, dat, ord_n)\n', (16114, 16129), True, 'import numpy as np\n'), ((16142, 16159), 'numpy.polyval', 'np.polyval', (['pp', 't'], {}), '(pp, t)\n', (16152, 16159), True, 'import numpy as np\n'), ((18121, 18137), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (18134, 18137), True, 'import numpy as np\n'), ((18861, 18872), 'numpy.abs', 'np.abs', (['num'], {}), '(num)\n', (18867, 18872), True, 'import numpy as np\n'), ((18883, 18917), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (18896, 18917), True, 'import numpy as np\n'), ((22914, 22929), 'numpy.isnan', 'np.isnan', (['mindx'], {}), '(mindx)\n', (22922, 22929), True, 'import numpy as np\n'), ((22979, 22997), 'numpy.isnan', 'np.isnan', (['startdat'], {}), '(startdat)\n', (22987, 22997), True, 'import numpy as np\n'), ((23659, 23726), 'ion_functions.qc.qc_extensions.gradientvalues', 'gradientvalues', (['dat', 'x', 'grad_min', 'grad_max', 'mindx', 'startdat', 'toldat'], {}), '(dat, x, grad_min, grad_max, mindx, startdat, toldat)\n', (23673, 23726), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((27028, 27055), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27036, 27055), True, 'import numpy as np\n'), ((27065, 27092), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27073, 27092), True, 'import numpy as np\n'), ((27102, 27129), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27110, 27129), True, 'import numpy as np\n'), ((27139, 27166), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27147, 27166), True, 'import numpy as np\n'), ((27176, 27203), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27184, 27203), True, 'import numpy as np\n'), ((27213, 27240), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27221, 27240), True, 'import numpy as np\n'), ((30314, 30326), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (30321, 30326), True, 'import numpy as np\n'), ((33207, 33225), 'numpy.all', 'np.all', (['inflags', '(0)'], {}), '(inflags, 0)\n', (33213, 33225), True, 'import numpy as np\n'), ((656, 690), 'logging.getLogger', 'logging.getLogger', (['"""ion-functions"""'], {}), "('ion-functions')\n", (673, 690), False, 'import logging\n'), ((1209, 1243), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (1217, 1243), True, 'import numpy as np\n'), ((3623, 3657), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3631, 3657), True, 'import numpy as np\n'), ((3790, 3824), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3798, 3824), True, 'import numpy as np\n'), ((3902, 3936), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3910, 3936), True, 'import numpy as np\n'), ((4023, 4057), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (4031, 4057), True, 'import numpy as np\n'), ((4520, 4538), 'numpy.column_stack', 'np.column_stack', (['z'], {}), '(z)\n', (4535, 4538), True, 'import numpy as np\n'), ((8558, 8620), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, 0]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)\n', (8567, 8620), True, 'import numpy as np\n'), ((8700, 8762), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, 1]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)\n', (8709, 8762), True, 'import numpy as np\n'), ((9304, 9318), 'numpy.isnan', 'np.isnan', (['lim1'], {}), '(lim1)\n', (9312, 9318), True, 'import numpy as np\n'), ((9323, 9337), 'numpy.isnan', 'np.isnan', (['lim2'], {}), '(lim2)\n', (9331, 9337), True, 'import numpy as np\n'), ((9694, 9728), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (9702, 9728), True, 'import numpy as np\n'), ((12929, 12963), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (12937, 12963), True, 'import numpy as np\n'), ((16622, 16648), 'numpy.empty', 'np.empty', (['x.shape', 'np.int8'], {}), '(x.shape, np.int8)\n', (16630, 16648), True, 'import numpy as np\n'), ((19028, 19060), 'numpy.zeros', 'np.zeros', (['dat.size'], {'dtype': '"""int8"""'}), "(dat.size, dtype='int8')\n", (19036, 19060), True, 'import numpy as np\n'), ((19085, 19112), 'ion_functions.qc.qc_extensions.stuckvalues', 'stuckvalues', (['dat', 'reso', 'num'], {}), '(dat, reso, num)\n', (19096, 19112), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((19402, 19436), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (19410, 19436), True, 'import numpy as np\n'), ((23179, 23200), 'ion_functions.utils.isscalar', 'utils.isscalar', (['mindx'], {}), '(mindx)\n', (23193, 23200), False, 'from ion_functions import utils\n'), ((23280, 23304), 'ion_functions.utils.isscalar', 'utils.isscalar', (['startdat'], {}), '(startdat)\n', (23294, 23304), False, 'from ion_functions import utils\n'), ((23450, 23470), 'numpy.abs', 'np.abs', (['(x[0] - x[-1])'], {}), '(x[0] - x[-1])\n', (23456, 23470), True, 'import numpy as np\n'), ((23494, 23511), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (23502, 23511), True, 'import numpy as np\n'), ((23540, 23577), 'ooi.logging.log.warn', 'log.warn', (['"""Too few values to inspect"""'], {}), "('Too few values to inspect')\n", (23548, 23577), False, 'from ooi.logging import log\n'), ((26249, 26268), 'ion_functions.utils.isvector', 'utils.isvector', (['lon'], {}), '(lon)\n', (26263, 26268), False, 'from ion_functions import utils\n'), ((26273, 26292), 'ion_functions.utils.isvector', 'utils.isvector', (['lat'], {}), '(lat)\n', (26287, 26292), False, 'from ion_functions import utils\n'), ((26297, 26315), 'ion_functions.utils.isvector', 'utils.isvector', (['dt'], {}), '(dt)\n', (26311, 26315), False, 'from ion_functions import utils\n'), ((27317, 27335), 'time.gmtime', 'time.gmtime', (['dt[i]'], {}), '(dt[i])\n', (27328, 27335), False, 'import time\n'), ((30492, 30505), 'numpy.arcsin', 'np.arcsin', (['sz'], {}), '(sz)\n', (30501, 30505), True, 'import numpy as np\n'), ((30655, 30674), 'numpy.sin', 'np.sin', (['(deg2rad * z)'], {}), '(deg2rad * z)\n', (30661, 30674), True, 'import numpy as np\n'), ((31028, 31064), 'numpy.array', 'np.array', (['[i.shape[0] for i in args]'], {}), '([i.shape[0] for i in args])\n', (31036, 31064), True, 'import numpy as np\n'), ((31212, 31226), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (31220, 31226), True, 'import numpy as np\n'), ((721, 739), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (734, 739), True, 'import numpy as np\n'), ((4267, 4299), 'numpy.asanyarray', 'np.asanyarray', (['v'], {'dtype': 'np.float'}), '(v, dtype=np.float)\n', (4280, 4299), True, 'import numpy as np\n'), ((4316, 4331), 'ion_functions.qc.qc_extensions.ntp_to_month', 'ntp_to_month', (['v'], {}), '(v)\n', (4328, 4331), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((6201, 6220), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (6215, 6220), False, 'from ion_functions import utils\n'), ((6295, 6317), 'ion_functions.utils.ismatrix', 'utils.ismatrix', (['datlim'], {}), '(datlim)\n', (6309, 6317), False, 'from ion_functions import utils\n'), ((9803, 9821), 'numpy.atleast_1d', 'np.atleast_1d', (['acc'], {}), '(acc)\n', (9816, 9821), True, 'import numpy as np\n'), ((9827, 9843), 'numpy.atleast_1d', 'np.atleast_1d', (['N'], {}), '(N)\n', (9840, 9843), True, 'import numpy as np\n'), ((9849, 9865), 'numpy.atleast_1d', 'np.atleast_1d', (['L'], {}), '(L)\n', (9862, 9865), True, 'import numpy as np\n'), ((12282, 12301), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (12296, 12301), False, 'from ion_functions import utils\n'), ((13045, 13065), 'numpy.atleast_1d', 'np.atleast_1d', (['ord_n'], {}), '(ord_n)\n', (13058, 13065), True, 'import numpy as np\n'), ((13071, 13090), 'numpy.atleast_1d', 'np.atleast_1d', (['nstd'], {}), '(nstd)\n', (13084, 13090), True, 'import numpy as np\n'), ((16727, 16746), 'numpy.atleast_1d', 'np.atleast_1d', (['reso'], {}), '(reso)\n', (16740, 16746), True, 'import numpy as np\n'), ((16752, 16770), 'numpy.atleast_1d', 'np.atleast_1d', (['num'], {}), '(num)\n', (16765, 16770), True, 'import numpy as np\n'), ((18278, 18297), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (18292, 18297), False, 'from ion_functions import utils\n'), ((19575, 19595), 'numpy.atleast_1d', 'np.atleast_1d', (['mindx'], {}), '(mindx)\n', (19588, 19595), True, 'import numpy as np\n'), ((19601, 19624), 'numpy.atleast_1d', 'np.atleast_1d', (['startdat'], {}), '(startdat)\n', (19614, 19624), True, 'import numpy as np\n'), ((19630, 19651), 'numpy.atleast_1d', 'np.atleast_1d', (['toldat'], {}), '(toldat)\n', (19643, 19651), True, 'import numpy as np\n'), ((22810, 22844), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (22823, 22844), True, 'import numpy as np\n'), ((22863, 22895), 'numpy.asanyarray', 'np.asanyarray', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (22876, 22895), True, 'import numpy as np\n'), ((29479, 29502), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29485, 29502), True, 'import numpy as np\n'), ((29710, 29745), 'numpy.sin', 'np.sin', (['(2.0 * ma_sun - 2.0 * ma_ven)'], {}), '(2.0 * ma_sun - 2.0 * ma_ven)\n', (29716, 29745), True, 'import numpy as np\n'), ((29794, 29806), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (29801, 29806), True, 'import numpy as np\n'), ((30119, 30132), 'numpy.sin', 'np.sin', (['(4 * l)'], {}), '(4 * l)\n', (30125, 30132), True, 'import numpy as np\n'), ((30367, 30388), 'numpy.sin', 'np.sin', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30373, 30388), True, 'import numpy as np\n'), ((30389, 30402), 'numpy.sin', 'np.sin', (['decln'], {}), '(decln)\n', (30395, 30402), True, 'import numpy as np\n'), ((30453, 30474), 'numpy.cos', 'np.cos', (['(deg2rad * lha)'], {}), '(deg2rad * lha)\n', (30459, 30474), True, 'import numpy as np\n'), ((32902, 32926), 'ion_functions.utils.islogical', 'utils.islogical', (['inflags'], {}), '(inflags)\n', (32917, 32926), False, 'from ion_functions import utils\n'), ((860, 878), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (873, 878), True, 'import numpy as np\n'), ((1325, 1347), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_min'], {}), '(dat_min)\n', (1338, 1347), True, 'import numpy as np\n'), ((1353, 1375), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_max'], {}), '(dat_max)\n', (1366, 1375), True, 'import numpy as np\n'), ((15670, 15689), 'ion_functions.utils.isvector', 'utils.isvector', (['arg'], {}), '(arg)\n', (15684, 15689), False, 'from ion_functions import utils\n'), ((15848, 15867), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (15862, 15867), False, 'from ion_functions import utils\n'), ((16399, 16417), 'numpy.ones', 'np.ones', (['dat.shape'], {}), '(dat.shape)\n', (16406, 16417), True, 'import numpy as np\n'), ((18646, 18665), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (18660, 18665), False, 'from ion_functions import utils\n'), ((19547, 19568), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19560, 19568), True, 'import numpy as np\n'), ((22477, 22496), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (22491, 22496), False, 'from ion_functions import utils\n'), ((22504, 22521), 'ion_functions.utils.isvector', 'utils.isvector', (['x'], {}), '(x)\n', (22518, 22521), False, 'from ion_functions import utils\n'), ((28148, 28170), 'numpy.fix', 'np.fix', (['(ma_sun / 360.0)'], {}), '(ma_sun / 360.0)\n', (28154, 28170), True, 'import numpy as np\n'), ((28319, 28341), 'numpy.fix', 'np.fix', (['(ml_sun / 360.0)'], {}), '(ml_sun / 360.0)\n', (28325, 28341), True, 'import numpy as np\n'), ((28485, 28507), 'numpy.fix', 'np.fix', (['(ma_jup / 360.0)'], {}), '(ma_jup / 360.0)\n', (28491, 28507), True, 'import numpy as np\n'), ((28931, 28953), 'numpy.fix', 'np.fix', (['(ma_ven / 360.0)'], {}), '(ma_ven / 360.0)\n', (28937, 28953), True, 'import numpy as np\n'), ((29415, 29447), 'numpy.cos', 'np.cos', (['(ma_sun - ml_sun - ma_jup)'], {}), '(ma_sun - ml_sun - ma_jup)\n', (29421, 29447), True, 'import numpy as np\n'), ((29675, 29698), 'numpy.sin', 'np.sin', (['(ma_sun - ma_jup)'], {}), '(ma_sun - ma_jup)\n', (29681, 29698), True, 'import numpy as np\n'), ((29929, 29946), 'numpy.fix', 'np.fix', (['(l / 360.0)'], {}), '(l / 360.0)\n', (29935, 29946), True, 'import numpy as np\n'), ((30087, 30100), 'numpy.cos', 'np.cos', (['(3 * l)'], {}), '(3 * l)\n', (30093, 30100), True, 'import numpy as np\n'), ((30405, 30426), 'numpy.cos', 'np.cos', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30411, 30426), True, 'import numpy as np\n'), ((30437, 30450), 'numpy.cos', 'np.cos', (['decln'], {}), '(decln)\n', (30443, 30450), True, 'import numpy as np\n'), ((2862, 2882), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (2877, 2882), False, 'from ion_functions import utils\n'), ((2962, 2979), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (2974, 2979), False, 'from ion_functions import utils\n'), ((3056, 3079), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['datlim'], {}), '(datlim)\n', (3071, 3079), False, 'from ion_functions import utils\n'), ((3162, 3182), 'ion_functions.utils.isreal', 'utils.isreal', (['datlim'], {}), '(datlim)\n', (3174, 3182), False, 'from ion_functions import utils\n'), ((12088, 12108), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (12103, 12108), False, 'from ion_functions import utils\n'), ((12188, 12205), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (12200, 12205), False, 'from ion_functions import utils\n'), ((16236, 16247), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (16242, 16247), True, 'import numpy as np\n'), ((18180, 18200), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (18195, 18200), False, 'from ion_functions import utils\n'), ((18370, 18387), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (18382, 18387), False, 'from ion_functions import utils\n'), ((19520, 19541), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19533, 19541), True, 'import numpy as np\n'), ((22712, 22722), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (22719, 22722), True, 'import numpy as np\n'), ((27867, 27891), 'numpy.fix', 'np.fix', (['(275.0 * mn / 9.0)'], {}), '(275.0 * mn / 9.0)\n', (27873, 27891), True, 'import numpy as np\n'), ((28720, 28743), 'numpy.fix', 'np.fix', (['(an_moon / 360.0)'], {}), '(an_moon / 360.0)\n', (28726, 28743), True, 'import numpy as np\n'), ((29365, 29392), 'numpy.sin', 'np.sin', (['(2 * ma_sun - ml_sun)'], {}), '(2 * ma_sun - ml_sun)\n', (29371, 29392), True, 'import numpy as np\n'), ((29636, 29650), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29642, 29650), True, 'import numpy as np\n'), ((30066, 30079), 'numpy.sin', 'np.sin', (['(3 * l)'], {}), '(3 * l)\n', (30072, 30079), True, 'import numpy as np\n'), ((3566, 3587), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3579, 3587), True, 'import numpy as np\n'), ((3733, 3754), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3746, 3754), True, 'import numpy as np\n'), ((6567, 6587), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (6582, 6587), False, 'from ion_functions import utils\n'), ((6685, 6702), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (6697, 6702), False, 'from ion_functions import utils\n'), ((12444, 12464), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (12459, 12464), False, 'from ion_functions import utils\n'), ((12562, 12579), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (12574, 12579), False, 'from ion_functions import utils\n'), ((15384, 15404), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (15399, 15404), False, 'from ion_functions import utils\n'), ((15502, 15519), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (15514, 15519), False, 'from ion_functions import utils\n'), ((16206, 16225), 'numpy.std', 'np.std', (['(dat - datpp)'], {}), '(dat - datpp)\n', (16212, 16225), True, 'import numpy as np\n'), ((18528, 18548), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (18543, 18548), False, 'from ion_functions import utils\n'), ((18758, 18775), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (18770, 18775), False, 'from ion_functions import utils\n'), ((29317, 29340), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29323, 29340), True, 'import numpy as np\n'), ((29586, 29604), 'numpy.cos', 'np.cos', (['(2 * ma_sun)'], {}), '(2 * ma_sun)\n', (29592, 29604), True, 'import numpy as np\n'), ((30046, 30059), 'numpy.cos', 'np.cos', (['(2 * l)'], {}), '(2 * l)\n', (30052, 30059), True, 'import numpy as np\n'), ((29259, 29283), 'numpy.sin', 'np.sin', (['(an_moon - ml_sun)'], {}), '(an_moon - ml_sun)\n', (29265, 29283), True, 'import numpy as np\n'), ((29558, 29572), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29564, 29572), True, 'import numpy as np\n'), ((30015, 30028), 'numpy.sin', 'np.sin', (['(2 * l)'], {}), '(2 * l)\n', (30021, 30028), True, 'import numpy as np\n'), ((29231, 29245), 'numpy.cos', 'np.cos', (['ml_sun'], {}), '(ml_sun)\n', (29237, 29245), True, 'import numpy as np\n'), ((29975, 29984), 'numpy.sin', 'np.sin', (['l'], {}), '(l)\n', (29981, 29984), True, 'import numpy as np\n'), ((29995, 30004), 'numpy.cos', 'np.cos', (['l'], {}), '(l)\n', (30001, 30004), True, 'import numpy as np\n'), ((29181, 29208), 'numpy.sin', 'np.sin', (['(2 * ma_sun + ml_sun)'], {}), '(2 * ma_sun + ml_sun)\n', (29187, 29208), True, 'import numpy as np\n'), ((27828, 27851), 'numpy.fix', 'np.fix', (['((mn + 9) / 12.0)'], {}), '((mn + 9) / 12.0)\n', (27834, 27851), True, 'import numpy as np\n'), ((29153, 29167), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29159, 29167), True, 'import numpy as np\n'), ((29096, 29119), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29102, 29119), True, 'import numpy as np\n'), ((29020, 29034), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29026, 29034), True, 'import numpy as np\n'), ((29048, 29071), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29054, 29071), True, 'import numpy as np\n')] |
"""
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def read_file(filename):
"""
Read output from query_events command.
The file is expected to be formatted such that there's a header line, a separator, then the
data. The column set must contain "elapsed" and "rowcount" for later processing.
Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present.
"""
column_spacing_re = re.compile(r"\s+\|\s+")
row_count_re = re.compile(r"\(\d+\s*rows\)")
print(f"Reading events from {filename}...")
with open(filename) as f:
for i, line in enumerate(f.readlines()):
if i == 1 or row_count_re.match(line):
# Found the separator line or the final row tally.
continue
yield column_spacing_re.sub("|", line).strip()
def parse_file(filename):
"""Parse the input as '|'-delimited columns."""
lines = read_file(filename)
reader = csv.reader(lines, delimiter="|")
row_class = namedtuple("CsvRow", next(reader), rename=True)
for row in reader:
yield row_class._make(row)
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| [
"tabulate.tabulate",
"math.isclose",
"re.compile",
"collections.defaultdict",
"sys.exit",
"csv.reader"
]
| [((1145, 1170), 're.compile', 're.compile', (['"""\\\\s+\\\\|\\\\s+"""'], {}), "('\\\\s+\\\\|\\\\s+')\n", (1155, 1170), False, 'import re\n'), ((1188, 1220), 're.compile', 're.compile', (['"""\\\\(\\\\d+\\\\s*rows\\\\)"""'], {}), "('\\\\(\\\\d+\\\\s*rows\\\\)')\n", (1198, 1220), False, 'import re\n'), ((1673, 1705), 'csv.reader', 'csv.reader', (['lines'], {'delimiter': '"""|"""'}), "(lines, delimiter='|')\n", (1683, 1705), False, 'import csv\n'), ((2023, 2049), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2034, 2049), False, 'from collections import defaultdict, namedtuple\n'), ((2064, 2090), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (2075, 2090), False, 'from collections import defaultdict, namedtuple\n'), ((4183, 4234), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.1)'}), '(previous_value, current_value, rel_tol=0.1)\n', (4190, 4234), False, 'from math import isclose\n'), ((5085, 5199), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "('target', 'step', 'prev. ' + column, 'cur. ' + column, 'delta %')", 'tablefmt': '"""presto"""'}), "(table, headers=('target', 'step', 'prev. ' + column, 'cur. ' +\n column, 'delta %'), tablefmt='presto')\n", (5093, 5199), False, 'from tabulate import tabulate\n'), ((5357, 5368), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5365, 5368), False, 'import sys\n'), ((5543, 5554), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5551, 5554), False, 'import sys\n'), ((3457, 3509), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'abs_tol': '(20.0)'}), '(previous_value, current_value, abs_tol=20.0)\n', (3464, 3509), False, 'from math import isclose\n'), ((3591, 3642), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.5)'}), '(previous_value, current_value, rel_tol=0.5)\n', (3598, 3642), False, 'from math import isclose\n'), ((3726, 3777), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'rel_tol': '(0.2)'}), '(previous_value, current_value, rel_tol=0.2)\n', (3733, 3777), False, 'from math import isclose\n'), ((4116, 4166), 'math.isclose', 'isclose', (['previous_value', 'current_value'], {'abs_tol': '(10)'}), '(previous_value, current_value, abs_tol=10)\n', (4123, 4166), False, 'from math import isclose\n')] |
# Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
def main():
logging.setup_logging(LOG_FILE_NAME)
# Allow selecting configs for experiments, and load it.
args = arguments.get_parsed_arguments()
config_file = Config.get_config_file(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)
config = Config()
config.load(config_file)
# Load scenario data.
drift_module, params = drift_generator.load_drift_config(config.get("drift_scenario"))
if args.test:
drift_generator.test_drift(config, drift_module, params, config.get("bins"))
else:
# Sort dataset into bins.
base_dataset = load_dataset(config.get("dataset"), config.get("dataset_class"))
bin_value = config.get("bin_value") if config.contains("bin_value") else "results"
bin_shuffle = config.get("bin_shuffle") if config.contains("bin_shuffle") else True
bins = drift_generator.load_bins(base_dataset, config.get("bins"), bin_value, bin_shuffle)
# Apply drift.
drifted_dataset = drift_generator.apply_drift(bins, drift_module, params)
drift_generator.add_timestamps(drifted_dataset, config.get("timestamps"))
# Save it to regular file, and timestamped file.
drifted_dataset.save_to_file(config.get("output"))
print("Copying output file to timestamped backup.")
shutil.copyfile(config.get("output"), drift_generator.get_drift_stamped_name(config.get("output")))
if __name__ == '__main__':
main()
| [
"utils.logging.setup_logging",
"utils.arguments.get_parsed_arguments",
"datasets.dataset.load_dataset_class",
"utils.config.Config.get_config_file",
"utils.config.Config",
"drift.drift_generator.apply_drift"
]
| [((2523, 2569), 'datasets.dataset.load_dataset_class', 'dataset.load_dataset_class', (['dataset_class_name'], {}), '(dataset_class_name)\n', (2549, 2569), False, 'from datasets import dataset\n'), ((2697, 2733), 'utils.logging.setup_logging', 'logging.setup_logging', (['LOG_FILE_NAME'], {}), '(LOG_FILE_NAME)\n', (2718, 2733), False, 'from utils import logging\n'), ((2806, 2838), 'utils.arguments.get_parsed_arguments', 'arguments.get_parsed_arguments', ([], {}), '()\n', (2836, 2838), False, 'from utils import arguments\n'), ((2857, 2935), 'utils.config.Config.get_config_file', 'Config.get_config_file', (['args', 'DRIFT_EXP_CONFIG_FOLDER', 'DEFAULT_CONFIG_FILENAME'], {}), '(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)\n', (2879, 2935), False, 'from utils.config import Config\n'), ((2949, 2957), 'utils.config.Config', 'Config', ([], {}), '()\n', (2955, 2957), False, 'from utils.config import Config\n'), ((3673, 3728), 'drift.drift_generator.apply_drift', 'drift_generator.apply_drift', (['bins', 'drift_module', 'params'], {}), '(bins, drift_module, params)\n', (3700, 3728), False, 'from drift import drift_generator\n')] |
import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
@pytest.mark.xfail
def test_90():
r"""
P90 (**) Eight queens problem
This is a classical problem in computer science. The objective is to
place eight queens on a chessboard so that no two queens are attacking
each other; i.e., no two queens are in the same row, the same column,
or on the same diagonal. We generalize this original problem by
allowing for an arbitrary dimension N of the chessboard.
We represent the positions of the queens as a list of numbers 1..N.
Example: [4,2,7,3,6,8,5,1] means that the queen in the first column
is in row 4, the queen in the second column is in row 2, etc.
By using the permutations of the numbers 1..N we guarantee that
no two queens are in the same row. The only test that remains
to be made is the diagonal test. A queen placed at column X and
row Y occupies two diagonals: one of them, with number C = X-Y, goes
from bottom-left to top-right, the other one, numbered D = X+Y, goes
from top-left to bottom-right. In the test predicate we keep track
of the already occupied diagonals in Cs and Ds.
% The first version is a simple generate-and-test solution.
% queens_1(N,Qs) :- Qs is a solution of the N-queens problem
queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs).
% range(A,B,L) :- L is the list of numbers A..B
range(A,A,[A]).
range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L).
% permu(Xs,Zs) :- the list Zs is a permutation of the list Xs
permu([],[]).
permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys).
del(X,[X|Xs],Xs).
del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs).
% test(Qs) :- the list Qs represents a non-attacking queens solution
test(Qs) :- test(Qs,1,[],[]).
% test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N,
% are not in conflict with the diagonals Cs and Ds
test([],_,_,_).
test([Y|Ys],X,Cs,Ds) :-
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X + 1,
test(Ys,X1,[C|Cs],[D|Ds]).
%--------------------------------------------------------------
% Now, in version 2, the tester is pushed completely inside the
% generator permu.
queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]).
permu_test([],[],_,_,_).
permu_test(Qs,[Y|Ys],X,Cs,Ds) :-
del(Y,Qs,Rs),
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X+1,
permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]).
"""
N, Qs, N, Rs, Qs, A, B, L, A1, Y, Ys, X, Xs, Zs = Variable.factory(
"N", "Qs", "N", "Rs", "Qs", "A", "B", "L", "A1", "Y", "Ys", "X", "Xs", "Zs"
)
_W1, _W2, _W3 = Variable.factory("_W1", "_W2", "_W3")
Cs, Ds, D, X1, C, Cs = Variable.factory("Cs", "Ds", "D", "X1", "C", "Cs")
db = [
Rule(
dict(queens_1=N, a=Qs),
dict(range=1, a=N, b=Rs),
dict(permu=Rs, a=Qs),
dict(test=Qs),
),
dict(range=A, a=A, b=[A]),
Rule(
dict(range=A, a=B, b=[A, *L]),
Assert(lambda A, B: A < B),
Assign(A1, lambda A: A + 1),
dict(range=A1, a=B, b=L),
),
dict(permu=[], a=[]),
Rule(
dict(permu=Qs, a=[Y, *Ys]), dict(delete=Y, a=Qs, b=Rs), dict(permu=Rs, a=Ys)
),
dict(delete=X, a=[X, *Xs], b=Xs),
Rule(dict(delete=X, a=[Y, *Ys], b=[Y, *Zs]), dict(delete=X, a=Ys, b=Zs)),
Rule(dict(test=Qs), dict(test=Qs, a=1, b=[], c=[])),
dict(test=[], a=_W1, b=_W2, c=_W3),
Rule(
dict(test=[Y, *Ys], a=X, b=Cs, c=Ds),
Assign(C, lambda X, Y: X - Y),
Assert(lambda C, Cs: C not in Cs),
Assign(D, lambda X, Y: X + Y),
Assert(lambda D, Ds: D not in Ds),
Assign(X1, lambda X: X + 1),
dict(test=Ys, a=X1, b=[C, *Cs], c=[D, *Ds]),
),
]
Q = Variable("Q")
query = dict(queens_1=8, a=Q)
assert list(search(db, query)) == []
| [
"inference_logic.Variable",
"inference_logic.search",
"inference_logic.Variable.factory",
"inference_logic.data_structures.Assert",
"inference_logic.data_structures.Assign"
]
| [((2700, 2797), 'inference_logic.Variable.factory', 'Variable.factory', (['"""N"""', '"""Qs"""', '"""N"""', '"""Rs"""', '"""Qs"""', '"""A"""', '"""B"""', '"""L"""', '"""A1"""', '"""Y"""', '"""Ys"""', '"""X"""', '"""Xs"""', '"""Zs"""'], {}), "('N', 'Qs', 'N', 'Rs', 'Qs', 'A', 'B', 'L', 'A1', 'Y', 'Ys',\n 'X', 'Xs', 'Zs')\n", (2716, 2797), False, 'from inference_logic import Rule, Variable, search\n'), ((2828, 2865), 'inference_logic.Variable.factory', 'Variable.factory', (['"""_W1"""', '"""_W2"""', '"""_W3"""'], {}), "('_W1', '_W2', '_W3')\n", (2844, 2865), False, 'from inference_logic import Rule, Variable, search\n'), ((2893, 2943), 'inference_logic.Variable.factory', 'Variable.factory', (['"""Cs"""', '"""Ds"""', '"""D"""', '"""X1"""', '"""C"""', '"""Cs"""'], {}), "('Cs', 'Ds', 'D', 'X1', 'C', 'Cs')\n", (2909, 2943), False, 'from inference_logic import Rule, Variable, search\n'), ((4078, 4091), 'inference_logic.Variable', 'Variable', (['"""Q"""'], {}), "('Q')\n", (4086, 4091), False, 'from inference_logic import Rule, Variable, search\n'), ((3220, 3246), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda A, B: A < B)'], {}), '(lambda A, B: A < B)\n', (3226, 3246), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3260, 3287), 'inference_logic.data_structures.Assign', 'Assign', (['A1', '(lambda A: A + 1)'], {}), '(A1, lambda A: A + 1)\n', (3266, 3287), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3787, 3816), 'inference_logic.data_structures.Assign', 'Assign', (['C', '(lambda X, Y: X - Y)'], {}), '(C, lambda X, Y: X - Y)\n', (3793, 3816), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3830, 3863), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda C, Cs: C not in Cs)'], {}), '(lambda C, Cs: C not in Cs)\n', (3836, 3863), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3877, 3906), 'inference_logic.data_structures.Assign', 'Assign', (['D', '(lambda X, Y: X + Y)'], {}), '(D, lambda X, Y: X + Y)\n', (3883, 3906), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3920, 3953), 'inference_logic.data_structures.Assert', 'Assert', (['(lambda D, Ds: D not in Ds)'], {}), '(lambda D, Ds: D not in Ds)\n', (3926, 3953), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((3967, 3994), 'inference_logic.data_structures.Assign', 'Assign', (['X1', '(lambda X: X + 1)'], {}), '(X1, lambda X: X + 1)\n', (3973, 3994), False, 'from inference_logic.data_structures import Assert, Assign\n'), ((4142, 4159), 'inference_logic.search', 'search', (['db', 'query'], {}), '(db, query)\n', (4148, 4159), False, 'from inference_logic import Rule, Variable, search\n')] |
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
class DiagnosticsWidget(QPushButton):
DIAGNOSTICS_TOPLEVEL_TOPIC_NAME = rospy.get_param('diagnostics_toplevel_topic_name','/diagnostics_toplevel_state')
state = "status_stale"
msg = "No diagnostic messages received"
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
# Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message
self.connect(self, SIGNAL("stateChanged"), self.update_state)
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
self._diagnostics_toplevel_state_sub = rospy.Subscriber(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME , DiagnosticStatus, self.toplevel_state_callback)
# Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
def update_state(self, state, msg):
self.setIcon(R.getIconById(state))
self.setIconSize(QSize(40,40))
self.setToolTip(msg)
def toplevel_state_callback(self, msg):
self.state = msg.level
if msg.level == 0:
self.state= "status_ok"
self.msg = "OK"
if msg.level == 1 :
self.state= "status_warning"
self.msg = "WARNING"
if msg.level == 2 :
self.state= "status_error"
self.msg = "ERROR"
if msg.level == 3 :
self.state= "status_stale"
self.msg = "STALE"
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
def _trigger_button(self, checked):
popup = DiagnosticsPopup(self, self._context)
popup.show_()
class DiagnosticsPopup(QAgiPopup):
def __init__(self, parent, context):
"""! The constructor."""
QAgiPopup.__init__(self, parent)
self._context = context
self._parent = parent
self.setRelativePosition(QAgiPopup.TopRight, QAgiPopup.BottomRight)
loadUi(R.layouts.diagnostics_popup, self)
self._inspectors = {}
self._current_msg = None
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self.adjustSize()
# Diagnostics subscriber
DIAGNOSTICS_TOPIC_NAME = rospy.get_param('diagnostics_topic_name','/diagnostics_agg')
self.connect(self,SIGNAL("UpdateDiagnostics"), self.update_diag)
self._diagnostics_agg_sub = rospy.Subscriber(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)
def update_diag(self):
#update the tree
self._tree.prune()
self.tree_all_devices.resizeColumnToContents(0)
self.adjustSize()
def message_cb(self,msg):
""" DiagnosticArray message callback """
for status in msg.status:
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
self.emit(SIGNAL('UpdateDiagnostics'))
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| [
"rqt_robot_monitor.util_robot_monitor.get_resource_name",
"airbus_cobot_gui.context.Context",
"python_qt_binding.loadUi",
"rospy.get_param",
"airbus_pyqt_extend.QtAgiGui.QAgiPopup.__init__",
"airbus_cobot_gui.res.R.getIconById",
"rospy.Subscriber"
]
| [((1353, 1438), 'rospy.get_param', 'rospy.get_param', (['"""diagnostics_toplevel_topic_name"""', '"""/diagnostics_toplevel_state"""'], {}), "('diagnostics_toplevel_topic_name',\n '/diagnostics_toplevel_state')\n", (1368, 1438), False, 'import rospy\n'), ((1939, 2045), 'rospy.Subscriber', 'rospy.Subscriber', (['self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME', 'DiagnosticStatus', 'self.toplevel_state_callback'], {}), '(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME, DiagnosticStatus,\n self.toplevel_state_callback)\n', (1955, 2045), False, 'import rospy\n'), ((3137, 3169), 'airbus_pyqt_extend.QtAgiGui.QAgiPopup.__init__', 'QAgiPopup.__init__', (['self', 'parent'], {}), '(self, parent)\n', (3155, 3169), False, 'from airbus_pyqt_extend.QtAgiGui import QAgiPopup\n'), ((3317, 3358), 'python_qt_binding.loadUi', 'loadUi', (['R.layouts.diagnostics_popup', 'self'], {}), '(R.layouts.diagnostics_popup, self)\n', (3323, 3358), False, 'from python_qt_binding import loadUi\n'), ((3771, 3832), 'rospy.get_param', 'rospy.get_param', (['"""diagnostics_topic_name"""', '"""/diagnostics_agg"""'], {}), "('diagnostics_topic_name', '/diagnostics_agg')\n", (3786, 3832), False, 'import rospy\n'), ((3941, 4015), 'rospy.Subscriber', 'rospy.Subscriber', (['DIAGNOSTICS_TOPIC_NAME', 'DiagnosticArray', 'self.message_cb'], {}), '(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)\n', (3957, 4015), False, 'import rospy\n'), ((2298, 2318), 'airbus_cobot_gui.res.R.getIconById', 'R.getIconById', (['state'], {}), '(state)\n', (2311, 2318), False, 'from airbus_cobot_gui.res import R\n'), ((4790, 4803), 'airbus_cobot_gui.context.Context', 'Context', (['main'], {}), '(main)\n', (4797, 4803), False, 'from airbus_cobot_gui.context import Context\n'), ((4532, 4567), 'rqt_robot_monitor.util_robot_monitor.get_resource_name', 'util.get_resource_name', (['status.name'], {}), '(status.name)\n', (4554, 4567), True, 'import rqt_robot_monitor.util_robot_monitor as util\n')] |
"""Handles data storage for Users, rides and requests
"""
# pylint: disable=E1101
import datetime
from flask import make_response, jsonify, current_app
from werkzeug.security import generate_password_hash
import psycopg2
import config
from databasesetup import db
class User():
"""Contains user columns and methods to add, update and delete a user"""
def __init__(self, username, email, password, admin):
self.username = username
self.email = email
self.password = generate_password_hash(password, method='sha256')
if admin == True:
self.admin = '1'
else:
self.admin = '0'
new_user = "INSERT INTO users (username, email, password, admin) VALUES " \
"('" + self.username + "', '" + self.email + "', '" + self.password + "', '" + self.admin + "')"
db_cursor = db.con()
db_cursor.execute(new_user)
db.commit()
@staticmethod
def update_user(user_id, username, email, password, admin):
"""Updates user information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE users SET username=%s, email=%s, password=%s, admin=%s WHERE user_id=%s",
(username, email, password, admin, user_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def delete_user(user_id):
"""Deletes a user"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM users WHERE user_id=%s", (user_id,))
db.commit()
return make_response(jsonify({"message" : "user has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_user(user_id):
"""Gets a particular user"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users WHERE user_id=%s", (user_id,))
user = db_cursor.fetchall()
if user != []:
user=user[0]
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
return make_response(jsonify({"profile" : info}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_all_users():
"""Gets all users"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users")
users = db_cursor.fetchall()
all_users = []
for user in users:
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
all_users.append(info)
return make_response(jsonify({"All users" : all_users}), 200)
class Ride():
"""Contains ride columns and methods to add, update and delete a ride"""
def __init__(self, ride, driver_id, departuretime, numberplate, maximum, status):
self.ride = ride
self.driver_id = driver_id
self.departuretime = departuretime
self.numberplate = numberplate
self.maximum = maximum
self.status = status
new_ride = "INSERT INTO rides (ride, driver_id, departuretime, numberplate, maximum, status) VALUES " \
"('" + self.ride + "', '" + self.driver_id + "', '" + self.departuretime + "', '" + self.numberplate + "','" + self.maximum + "','" + self.status + "' )"
db_cursor = db.con()
db_cursor.execute(new_ride)
db.commit()
@classmethod
def create_ride(cls, ride, driver_id, departuretime, numberplate, maximum, status="pending"):
"""Creates a new ride"""
cls(ride, driver_id, departuretime, numberplate, maximum, status)
return make_response(jsonify({"message" : "ride has been successfully created"}), 201)
@staticmethod
def update_ride(ride_id, ride, driver_id, departuretime, numberplate,
maximum):
"""Updates ride information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE rides SET ride=%s, driver_id=%s, departuretime=%s, numberplate=%s, maximum=%s WHERE ride_id=%s",
(ride, driver_id, departuretime, numberplate, maximum, ride_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def start_ride(ride_id, driver_id):
"""starts a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride = ride[0]
if int(ride[2]) == driver_id:
db_cursor.execute("UPDATE rides SET status=%s WHERE ride_id=%s", ("given", ride_id,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("taken", ride_id, True,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("rejected", ride_id, False,))
db.commit()
return {"message" : "ride has started"}
return {"message" : "The ride you want to start is not your ride."}
return {"message" : "ride does not exist"}
@staticmethod
def delete_ride(ride_id):
"""Deletes a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
for ride in rides:
if ride[0] == ride_id:
db_cursor.execute("DELETE FROM rides WHERE ride_id=%s", (ride_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_ride(ride_id):
"""Gets a particular ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride=ride[0]
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
return make_response(jsonify({"ride" : info}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_rides():
"""Gets all rides"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
all_rides = []
for ride in rides:
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
all_rides.append(info)
return make_response(jsonify({"All rides" : all_rides}), 200)
class Request:
"""Contains menu columns and methods to add, update and delete a request"""
def __init__(self, ride_id, user_id, accepted, status):
self.ride_id = str(ride_id)
self.user_id = str(user_id)
self.accepted = accepted
self.status = status
new_request = "INSERT INTO request (ride_id, user_id, accepted, status) VALUES " \
"('" + self.ride_id + "', '" + self.user_id + "', '" + '0' + "', '" + self.status + "')"
db_cursor = db.con()
db_cursor.execute(new_request)
db.commit()
@classmethod
def request_ride(cls, ride_id, user_id, accepted=False, status="pending"):
"""Creates a new request"""
db_cursor = db.con()
db_cursor.execute("SELECT status FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchone()
if ride[0] == "pending":
cls(ride_id, user_id, accepted, status)
return make_response(jsonify({"message" : "request has been successfully sent for approval"}), 201)
return make_response(jsonify({"message" : "ride is already given"}), 400)
@staticmethod
def delete_request(request_id):
"""Deletes a request"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM request WHERE request_id=%s", (request_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def accept_request(request_id):
"""Accepts request"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE request SET accepted=%s WHERE request_id=%s", (True, request_id))
db.commit()
return make_response(jsonify({"message" : "request has been successfully accepted"}), 200)
except KeyError:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def get_requests(request_id):
"""Gets a particular request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
if request != None:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
return make_response(jsonify({"request" : info}), 200)
return make_response(jsonify({"message" : "request does not exists"}), 404)
@staticmethod
def get_particular_riderequests(ride_id):
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE ride_id=%s", (ride_id,))
requests = db_cursor.fetchall()
if requests != []:
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_requests():
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request")
requests = db_cursor.fetchall()
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
class Relation:
"""Contains method to get driver_id and maximum from a requested ride"""
@staticmethod
def get_driver_id(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
ride_id = str(request[2])
db_cursor.execute("SELECT driver_id FROM rides WHERE ride_id=%s", (ride_id,))
driver_id = db_cursor.fetchone()
if driver_id == None:
return make_response(jsonify({"message" : "ride does not exists"}), 404)
driver_id = driver_id[0]
return int(driver_id)
@staticmethod
def get_maximum(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (str(request_id),))
request = db_cursor.fetchone()
db_cursor.execute("SELECT maximum FROM rides WHERE ride_id=%s", (request[2],))
maximum = db_cursor.fetchone()
maximum = maximum[0]
return maximum
| [
"flask.jsonify",
"databasesetup.db.commit",
"werkzeug.security.generate_password_hash",
"databasesetup.db.con"
]
| [((502, 551), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {'method': '"""sha256"""'}), "(password, method='sha256')\n", (524, 551), False, 'from werkzeug.security import generate_password_hash\n'), ((872, 880), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (878, 880), False, 'from databasesetup import db\n'), ((925, 936), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (934, 936), False, 'from databasesetup import db\n'), ((2063, 2071), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (2069, 2071), False, 'from databasesetup import db\n'), ((2629, 2637), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (2635, 2637), False, 'from databasesetup import db\n'), ((3722, 3730), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (3728, 3730), False, 'from databasesetup import db\n'), ((3775, 3786), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (3784, 3786), False, 'from databasesetup import db\n'), ((4879, 4887), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (4885, 4887), False, 'from databasesetup import db\n'), ((5768, 5776), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (5774, 5776), False, 'from databasesetup import db\n'), ((6326, 6334), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (6332, 6334), False, 'from databasesetup import db\n'), ((7058, 7066), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (7064, 7066), False, 'from databasesetup import db\n'), ((8135, 8143), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8141, 8143), False, 'from databasesetup import db\n'), ((8191, 8202), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (8200, 8202), False, 'from databasesetup import db\n'), ((8356, 8364), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8362, 8364), False, 'from databasesetup import db\n'), ((9854, 9862), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (9860, 9862), False, 'from databasesetup import db\n'), ((10500, 10508), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (10506, 10508), False, 'from databasesetup import db\n'), ((11280, 11288), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (11286, 11288), False, 'from databasesetup import db\n'), ((11992, 12000), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (11998, 12000), False, 'from databasesetup import db\n'), ((12574, 12582), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (12580, 12582), False, 'from databasesetup import db\n'), ((1111, 1119), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (1117, 1119), False, 'from databasesetup import db\n'), ((1321, 1332), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (1330, 1332), False, 'from databasesetup import db\n'), ((1647, 1655), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (1653, 1655), False, 'from databasesetup import db\n'), ((1748, 1759), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (1757, 1759), False, 'from databasesetup import db\n'), ((2484, 2528), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (2491, 2528), False, 'from flask import make_response, jsonify, current_app\n'), ((2992, 3025), 'flask.jsonify', 'jsonify', (["{'All users': all_users}"], {}), "({'All users': all_users})\n", (2999, 3025), False, 'from flask import make_response, jsonify, current_app\n'), ((4040, 4098), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully created'}"], {}), "({'message': 'ride has been successfully created'})\n", (4047, 4098), False, 'from flask import make_response, jsonify, current_app\n'), ((4305, 4313), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (4311, 4313), False, 'from databasesetup import db\n'), ((4560, 4571), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (4569, 4571), False, 'from databasesetup import db\n'), ((6171, 6215), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (6178, 6215), False, 'from flask import make_response, jsonify, current_app\n'), ((6905, 6949), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (6912, 6949), False, 'from flask import make_response, jsonify, current_app\n'), ((7581, 7614), 'flask.jsonify', 'jsonify', (["{'All rides': all_rides}"], {}), "({'All rides': all_rides})\n", (7588, 7614), False, 'from flask import make_response, jsonify, current_app\n'), ((8710, 8755), 'flask.jsonify', 'jsonify', (["{'message': 'ride is already given'}"], {}), "({'message': 'ride is already given'})\n", (8717, 8755), False, 'from flask import make_response, jsonify, current_app\n'), ((8889, 8897), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (8895, 8897), False, 'from databasesetup import db\n'), ((8998, 9009), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (9007, 9009), False, 'from databasesetup import db\n'), ((9362, 9370), 'databasesetup.db.con', 'db.con', ([], {}), '()\n', (9368, 9370), False, 'from databasesetup import db\n'), ((9487, 9498), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (9496, 9498), False, 'from databasesetup import db\n'), ((10360, 10407), 'flask.jsonify', 'jsonify', (["{'message': 'request does not exists'}"], {}), "({'message': 'request does not exists'})\n", (10367, 10407), False, 'from flask import make_response, jsonify, current_app\n'), ((11130, 11174), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (11137, 11174), False, 'from flask import make_response, jsonify, current_app\n'), ((11744, 11785), 'flask.jsonify', 'jsonify', (["{'ride_requests': ride_requests}"], {}), "({'ride_requests': ride_requests})\n", (11751, 11785), False, 'from flask import make_response, jsonify, current_app\n'), ((1366, 1424), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully updated'}"], {}), "({'message': 'user has been successfully updated'})\n", (1373, 1424), False, 'from flask import make_response, jsonify, current_app\n'), ((1793, 1851), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully deleted'}"], {}), "({'message': 'user has been successfully deleted'})\n", (1800, 1851), False, 'from flask import make_response, jsonify, current_app\n'), ((2421, 2447), 'flask.jsonify', 'jsonify', (["{'profile': info}"], {}), "({'profile': info})\n", (2428, 2447), False, 'from flask import make_response, jsonify, current_app\n'), ((4605, 4663), 'flask.jsonify', 'jsonify', (["{'message': 'user has been successfully updated'}"], {}), "({'message': 'user has been successfully updated'})\n", (4612, 4663), False, 'from flask import make_response, jsonify, current_app\n'), ((5468, 5479), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (5477, 5479), False, 'from databasesetup import db\n'), ((6026, 6037), 'databasesetup.db.commit', 'db.commit', ([], {}), '()\n', (6035, 6037), False, 'from databasesetup import db\n'), ((6845, 6868), 'flask.jsonify', 'jsonify', (["{'ride': info}"], {}), "({'ride': info})\n", (6852, 6868), False, 'from flask import make_response, jsonify, current_app\n'), ((8602, 8673), 'flask.jsonify', 'jsonify', (["{'message': 'request has been successfully sent for approval'}"], {}), "({'message': 'request has been successfully sent for approval'})\n", (8609, 8673), False, 'from flask import make_response, jsonify, current_app\n'), ((9044, 9102), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully deleted'}"], {}), "({'message': 'ride has been successfully deleted'})\n", (9051, 9102), False, 'from flask import make_response, jsonify, current_app\n'), ((9532, 9594), 'flask.jsonify', 'jsonify', (["{'message': 'request has been successfully accepted'}"], {}), "({'message': 'request has been successfully accepted'})\n", (9539, 9594), False, 'from flask import make_response, jsonify, current_app\n'), ((10297, 10323), 'flask.jsonify', 'jsonify', (["{'request': info}"], {}), "({'request': info})\n", (10304, 10323), False, 'from flask import make_response, jsonify, current_app\n'), ((11052, 11093), 'flask.jsonify', 'jsonify', (["{'ride_requests': ride_requests}"], {}), "({'ride_requests': ride_requests})\n", (11059, 11093), False, 'from flask import make_response, jsonify, current_app\n'), ((12351, 12395), 'flask.jsonify', 'jsonify', (["{'message': 'ride does not exists'}"], {}), "({'message': 'ride does not exists'})\n", (12358, 12395), False, 'from flask import make_response, jsonify, current_app\n'), ((1481, 1524), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exist'}"], {}), "({'message': 'user does not exist'})\n", (1488, 1524), False, 'from flask import make_response, jsonify, current_app\n'), ((1908, 1952), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exists'}"], {}), "({'message': 'user does not exists'})\n", (1915, 1952), False, 'from flask import make_response, jsonify, current_app\n'), ((4720, 4763), 'flask.jsonify', 'jsonify', (["{'message': 'user does not exist'}"], {}), "({'message': 'user does not exist'})\n", (4727, 4763), False, 'from flask import make_response, jsonify, current_app\n'), ((6076, 6134), 'flask.jsonify', 'jsonify', (["{'message': 'ride has been successfully deleted'}"], {}), "({'message': 'ride has been successfully deleted'})\n", (6083, 6134), False, 'from flask import make_response, jsonify, current_app\n'), ((9159, 9231), 'flask.jsonify', 'jsonify', (["{'message': 'the specified request does not exist in requests'}"], {}), "({'message': 'the specified request does not exist in requests'})\n", (9166, 9231), False, 'from flask import make_response, jsonify, current_app\n'), ((9660, 9732), 'flask.jsonify', 'jsonify', (["{'message': 'the specified request does not exist in requests'}"], {}), "({'message': 'the specified request does not exist in requests'})\n", (9667, 9732), False, 'from flask import make_response, jsonify, current_app\n')] |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
class NOCBoundField(forms.forms.BoundField):
"""
Bound field with django-admin like label-tag
"""
def __init__(self, *args, **kwargs):
super(NOCBoundField, self).__init__(*args, **kwargs)
self.is_checkbox = isinstance(self.field.widget, forms.CheckboxInput)
def label_tag(self, contents=None, attrs=None):
if not contents:
contents = force_unicode(
escape(self.field.label if self.field.label else self.name)
) + (":" if not self.is_checkbox else "")
classes = []
if self.is_checkbox:
classes += ["vCheckboxLabel"]
if self.field.required:
classes += ["required"]
if classes:
attrs = attrs.copy() if attrs else {}
attrs["class"] = " ".join(classes)
return super(NOCBoundField, self).label_tag(contents=contents, attrs=attrs)
class NOCForm(forms.Form):
"""
Form wrapper returning NOCBoundField items
"""
class Media(object):
css = {"all": ["/ui/pkg/django-media/admin/css/forms.css"]}
def __init__(self, *args, **kwargs):
super(NOCForm, self).__init__(*args, **kwargs)
self.disabled_fields = set()
def disable_field(self, name):
self.disabled_fields.add(name)
def __iter__(self):
for name, field in six.iteritems(self.fields):
if name not in self.disabled_fields:
yield NOCBoundField(self, field, name)
| [
"six.iteritems",
"django.utils.html.escape"
]
| [((1815, 1841), 'six.iteritems', 'six.iteritems', (['self.fields'], {}), '(self.fields)\n', (1828, 1841), False, 'import six\n'), ((893, 952), 'django.utils.html.escape', 'escape', (['(self.field.label if self.field.label else self.name)'], {}), '(self.field.label if self.field.label else self.name)\n', (899, 952), False, 'from django.utils.html import escape\n')] |
import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| [
"django.core.serializers.serialize",
"json.loads",
"json.dumps",
"django.http.HttpResponse"
]
| [((220, 256), 'json.dumps', 'json.dumps', (["{'error': 'Bad Request'}"], {}), "({'error': 'Bad Request'})\n", (230, 256), False, 'import json\n'), ((561, 577), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (571, 577), False, 'import json\n'), ((1257, 1316), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', 'unit'], {'fields': 'UNIT_LIST_FIELD'}), "('json', unit, fields=UNIT_LIST_FIELD)\n", (1278, 1316), False, 'from django.core import serializers\n'), ((1361, 1377), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1371, 1377), False, 'import json\n'), ((1889, 1952), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""', 'status': '(200)'}), "(data, content_type='application/json', status=200)\n", (1901, 1952), False, 'from django.http import HttpResponse\n'), ((838, 859), 'json.dumps', 'json.dumps', (['_raw_data'], {}), '(_raw_data)\n', (848, 859), False, 'import json\n'), ((1510, 1531), 'json.dumps', 'json.dumps', (['_raw_data'], {}), '(_raw_data)\n', (1520, 1531), False, 'import json\n'), ((1149, 1194), 'json.dumps', 'json.dumps', (["{'error': 'Unidad no encontrada'}"], {}), "({'error': 'Unidad no encontrada'})\n", (1159, 1194), False, 'import json\n')] |
'''
ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite.
'''
from colorise import set_color, reset_color
cor = {
'limpa':'\033[m',
'white':'\033[1;97m'
}
set_color(fg='green')
velocidade_carro = int(input('Informe a velocidade do carro KM/H: '))
if velocidade_carro > 80:
multa = (velocidade_carro - 80) * 7.00
print('\nMULTADO! VOCÊ ULTRAPASSOU O LIMITE PERMITIDO. LOGO TERÁ QUE PAGAR ', end='')
reset_color()
print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa']))
else:
set_color(fg='green')
print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANÇA!')
| [
"colorise.reset_color",
"colorise.set_color"
]
| [((306, 327), 'colorise.set_color', 'set_color', ([], {'fg': '"""green"""'}), "(fg='green')\n", (315, 327), False, 'from colorise import set_color, reset_color\n'), ((561, 574), 'colorise.reset_color', 'reset_color', ([], {}), '()\n', (572, 574), False, 'from colorise import set_color, reset_color\n'), ((653, 674), 'colorise.set_color', 'set_color', ([], {'fg': '"""green"""'}), "(fg='green')\n", (662, 674), False, 'from colorise import set_color, reset_color\n')] |
#!/usr/bin/env python
from setuptools import setup, find_packages
from pymemcache import __version__
setup(
name = 'pymemcache',
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
packages = find_packages(),
tests_require = ['nose>=1.0'],
install_requires = ['six'],
description = 'A comprehensive, fast, pure Python memcached client',
long_description = open('README.md').read(),
license = 'Apache License 2.0',
url = 'https://github.com/Pinterest/pymemcache',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database',
],
)
| [
"setuptools.find_packages"
]
| [((231, 246), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (244, 246), False, 'from setuptools import setup, find_packages\n')] |
import torch
from plyfile import PlyData
from torch_geometric.data import Data
def read_ply(path):
with open(path, 'rb') as f:
data = PlyData.read(f)
pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
pos = torch.stack(pos, dim=-1)
face = None
if 'face' in data:
faces = data['face']['vertex_indices']
faces = [torch.tensor(face, dtype=torch.long) for face in faces]
face = torch.stack(faces, dim=-1)
data = Data(pos=pos)
data.face = face
return data
| [
"plyfile.PlyData.read",
"torch.tensor",
"torch.stack",
"torch_geometric.data.Data"
]
| [((252, 276), 'torch.stack', 'torch.stack', (['pos'], {'dim': '(-1)'}), '(pos, dim=-1)\n', (263, 276), False, 'import torch\n'), ((491, 504), 'torch_geometric.data.Data', 'Data', ([], {'pos': 'pos'}), '(pos=pos)\n', (495, 504), False, 'from torch_geometric.data import Data\n'), ((148, 163), 'plyfile.PlyData.read', 'PlyData.read', (['f'], {}), '(f)\n', (160, 163), False, 'from plyfile import PlyData\n'), ((177, 211), 'torch.tensor', 'torch.tensor', (["data['vertex'][axis]"], {}), "(data['vertex'][axis])\n", (189, 211), False, 'import torch\n'), ((452, 478), 'torch.stack', 'torch.stack', (['faces'], {'dim': '(-1)'}), '(faces, dim=-1)\n', (463, 478), False, 'import torch\n'), ((381, 417), 'torch.tensor', 'torch.tensor', (['face'], {'dtype': 'torch.long'}), '(face, dtype=torch.long)\n', (393, 417), False, 'import torch\n')] |
from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
| [
"mlagents.trainers.brain.CameraResolution",
"numpy.sum",
"numpy.zeros",
"mlagents.envs.exception.UnityEnvironmentException",
"numpy.concatenate"
]
| [((990, 1031), 'numpy.zeros', 'np.zeros', (['(n_agents, 0)'], {'dtype': 'np.float32'}), '((n_agents, 0), dtype=np.float32)\n', (998, 1031), True, 'import numpy as np\n'), ((1060, 1129), 'numpy.concatenate', 'np.concatenate', (['[step_result.obs[i] for i in vec_obs_indices]'], {'axis': '(1)'}), '([step_result.obs[i] for i in vec_obs_indices], axis=1)\n', (1074, 1129), True, 'import numpy as np\n'), ((2261, 2295), 'mlagents.trainers.brain.CameraResolution', 'CameraResolution', (['s[0]', 's[1]', 's[2]'], {}), '(s[0], s[1], s[2])\n', (2277, 2295), False, 'from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution\n'), ((1220, 1250), 'numpy.sum', 'np.sum', (['group_spec.action_size'], {}), '(group_spec.action_size)\n', (1226, 1250), True, 'import numpy as np\n'), ((749, 898), 'mlagents.envs.exception.UnityEnvironmentException', 'UnityEnvironmentException', (['"""Invalid input received from the environment, the observation should either be a vector of float or a PNG image"""'], {}), "(\n 'Invalid input received from the environment, the observation should either be a vector of float or a PNG image'\n )\n", (774, 898), False, 'from mlagents.envs.exception import UnityEnvironmentException\n'), ((1358, 1401), 'numpy.sum', 'np.sum', (['group_spec.discrete_action_branches'], {}), '(group_spec.discrete_action_branches)\n', (1364, 1401), True, 'import numpy as np\n'), ((1502, 1549), 'numpy.concatenate', 'np.concatenate', (['step_result.action_mask'], {'axis': '(1)'}), '(step_result.action_mask, axis=1)\n', (1516, 1549), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| [
"skimage.color.rgb2gray",
"numpy.logical_and",
"numpy.where",
"scipy.misc.toimage",
"skimage.io.imread",
"skimage.io.imsave",
"numpy.random.uniform",
"wrapper.data_to_pic",
"numpy.random.RandomState"
]
| [((2702, 2731), 'numpy.where', 'np.where', (['(image < 0)', '(0)', 'image'], {}), '(image < 0, 0, image)\n', (2710, 2731), True, 'import numpy as np\n'), ((2744, 2773), 'numpy.where', 'np.where', (['(image > 1)', '(1)', 'image'], {}), '(image > 1, 1, image)\n', (2752, 2773), True, 'import numpy as np\n'), ((5095, 5108), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (5103, 5108), False, 'from skimage.color import rgb2gray\n'), ((5972, 6002), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (5993, 6002), True, 'import numpy as np\n'), ((397, 409), 'skimage.io.imread', 'imread', (['path'], {}), '(path)\n', (403, 409), False, 'from skimage.io import imread, imsave\n'), ((537, 569), 'skimage.io.imsave', 'imsave', (["(imgname + '.JPEG')", 'image'], {}), "(imgname + '.JPEG', image)\n", (543, 569), False, 'from skimage.io import imread, imsave\n'), ((1471, 1486), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (1479, 1486), False, 'from skimage.color import rgb2gray\n'), ((3309, 3365), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': '(nrow, ncol)'}), '(low=low, high=high, size=(nrow, ncol))\n', (3326, 3365), True, 'import numpy as np\n'), ((3749, 3783), 'numpy.logical_and', 'np.logical_and', (['(mat >= 0)', '(mat <= 1)'], {}), '(mat >= 0, mat <= 1)\n', (3763, 3783), True, 'import numpy as np\n'), ((4394, 4415), 'wrapper.data_to_pic', 'wr.data_to_pic', (['image'], {}), '(image)\n', (4408, 4415), True, 'import wrapper as wr\n'), ((587, 621), 'scipy.misc.toimage', 'toimage', (['image'], {'cmin': '(0.0)', 'cmax': '(1.0)'}), '(image, cmin=0.0, cmax=1.0)\n', (594, 621), False, 'from scipy.misc import toimage\n')] |
from django.db.models import fields
from main.models import RoomReservation, UserRoom
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
class ReservateRoomForm(forms.Form):
begin_date = forms.DateField()
end_date = forms.DateField()
class AddCommentForm(forms.Form):
text = forms.CharField(max_length=410)
accommodation = forms.ModelChoiceField(queryset=UserRoom.objects.all())
class EditReservationForm(forms.ModelForm):
class Meta:
model = RoomReservation
fields = ['begin_date', 'end_date']
| [
"django.forms.DateField",
"main.models.UserRoom.objects.all",
"django.forms.CharField"
]
| [((317, 334), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (332, 334), False, 'from django import forms\n'), ((350, 367), 'django.forms.DateField', 'forms.DateField', ([], {}), '()\n', (365, 367), False, 'from django import forms\n'), ((414, 445), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(410)'}), '(max_length=410)\n', (429, 445), False, 'from django import forms\n'), ((498, 520), 'main.models.UserRoom.objects.all', 'UserRoom.objects.all', ([], {}), '()\n', (518, 520), False, 'from main.models import RoomReservation, UserRoom\n')] |
# coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| [
"tensorflow.eye",
"collections.namedtuple",
"tensorflow.shape",
"tensorflow.random_normal",
"tensorflow.ones",
"tensorflow.reduce_sum",
"tensorflow.range",
"tensorflow.random_uniform",
"tensorflow.gather",
"tensorflow.squeeze",
"tensorflow.square",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.exp"
]
| [((871, 976), 'collections.namedtuple', 'collections.namedtuple', (['"""NPRegressionDescription"""', "('context_x', 'context_y', 'target_x', 'target_y')"], {}), "('NPRegressionDescription', ('context_x', 'context_y',\n 'target_x', 'target_y'))\n", (893, 976), False, 'import collections\n'), ((3274, 3303), 'tensorflow.expand_dims', 'tf.expand_dims', (['xdata'], {'axis': '(1)'}), '(xdata, axis=1)\n', (3288, 3303), True, 'import tensorflow as tf\n'), ((3353, 3382), 'tensorflow.expand_dims', 'tf.expand_dims', (['xdata'], {'axis': '(2)'}), '(xdata, axis=2)\n', (3367, 3382), True, 'import tensorflow as tf\n'), ((3571, 3630), 'tensorflow.square', 'tf.square', (['(diff[:, None, :, :, :] / l1[:, :, None, None, :])'], {}), '(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n', (3580, 3630), True, 'import tensorflow as tf\n'), ((3643, 3666), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['norm', '(-1)'], {}), '(norm, -1)\n', (3656, 3666), True, 'import tensorflow as tf\n'), ((3204, 3219), 'tensorflow.shape', 'tf.shape', (['xdata'], {}), '(xdata)\n', (3212, 3219), True, 'import tensorflow as tf\n'), ((3837, 3856), 'tensorflow.exp', 'tf.exp', (['(-0.5 * norm)'], {}), '(-0.5 * norm)\n', (3843, 3856), True, 'import tensorflow as tf\n'), ((3955, 3979), 'tensorflow.eye', 'tf.eye', (['num_total_points'], {}), '(num_total_points)\n', (3961, 3979), True, 'import tensorflow as tf\n'), ((4359, 4447), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(3)', 'maxval': 'self._max_num_context', 'dtype': 'tf.int32'}), '(shape=[], minval=3, maxval=self._max_num_context, dtype=\n tf.int32)\n', (4376, 4447), True, 'import tensorflow as tf\n'), ((4816, 4849), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_values'], {'axis': '(-1)'}), '(x_values, axis=-1)\n', (4830, 4849), True, 'import tensorflow as tf\n'), ((4980, 5081), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(self._max_num_context - num_context)', 'dtype': 'tf.int32'}), '(shape=(), minval=0, maxval=self._max_num_context -\n num_context, dtype=tf.int32)\n', (4997, 5081), True, 'import tensorflow as tf\n'), ((5219, 5295), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, num_total_points, self._x_size]', '(-2)', '(2)'], {}), '([self._batch_size, num_total_points, self._x_size], -2, 2)\n', (5236, 5295), True, 'import tensorflow as tf\n'), ((5452, 5543), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, self._y_size, self._x_size]', '(0.1)', 'self._l1_scale'], {}), '([self._batch_size, self._y_size, self._x_size], 0.1, self\n ._l1_scale)\n', (5469, 5543), True, 'import tensorflow as tf\n'), ((5585, 5660), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self._batch_size, self._y_size]', '(0.1)', 'self._sigma_scale'], {}), '([self._batch_size, self._y_size], 0.1, self._sigma_scale)\n', (5602, 5660), True, 'import tensorflow as tf\n'), ((6441, 6512), 'tensorflow.random_normal', 'tf.random_normal', (['[self._batch_size, self._y_size, num_total_points, 1]'], {}), '([self._batch_size, self._y_size, num_total_points, 1])\n', (6457, 6512), True, 'import tensorflow as tf\n'), ((6588, 6611), 'tensorflow.squeeze', 'tf.squeeze', (['y_values', '(3)'], {}), '(y_values, 3)\n', (6598, 6611), True, 'import tensorflow as tf\n'), ((6829, 6875), 'tensorflow.gather', 'tf.gather', (['x_values', 'idx[:num_context]'], {'axis': '(1)'}), '(x_values, idx[:num_context], axis=1)\n', (6838, 6875), True, 'import tensorflow as tf\n'), ((6894, 6940), 'tensorflow.gather', 'tf.gather', (['y_values', 'idx[:num_context]'], {'axis': '(1)'}), '(y_values, idx[:num_context], axis=1)\n', (6903, 6940), True, 'import tensorflow as tf\n'), ((3798, 3816), 'tensorflow.square', 'tf.square', (['sigma_f'], {}), '(sigma_f)\n', (3807, 3816), True, 'import tensorflow as tf\n'), ((5776, 5837), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[self._batch_size, self._y_size, self._x_size]'}), '(shape=[self._batch_size, self._y_size, self._x_size])\n', (5783, 5837), True, 'import tensorflow as tf\n'), ((5897, 5944), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[self._batch_size, self._y_size]'}), '(shape=[self._batch_size, self._y_size])\n', (5904, 5944), True, 'import tensorflow as tf\n'), ((6277, 6304), 'tensorflow.cast', 'tf.cast', (['kernel', 'tf.float64'], {}), '(kernel, tf.float64)\n', (6284, 6304), True, 'import tensorflow as tf\n'), ((6789, 6809), 'tensorflow.range', 'tf.range', (['num_target'], {}), '(num_target)\n', (6797, 6809), True, 'import tensorflow as tf\n'), ((4710, 4758), 'tensorflow.range', 'tf.range', (['(-2.0)', '(2.0)', '(1.0 / 100)'], {'dtype': 'tf.float32'}), '(-2.0, 2.0, 1.0 / 100, dtype=tf.float32)\n', (4718, 4758), True, 'import tensorflow as tf\n')] |
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
| [
"strawberryfields.Program",
"strawberryfields.ops.BSgate",
"strawberryfields.ops.MZgate",
"strawberryfields.ops.MeasureFock",
"strawberryfields.utils.random_interferometer",
"strawberryfields.apps.subgraph.search",
"numpy.sum",
"strawberryfields.ops.Interferometer",
"numpy.min",
"strawberryfields.ops.S2gate",
"strawberryfields.ops.Rgate"
]
| [((621, 700), 'strawberryfields.apps.subgraph.search', 'subgraph.search', (['samples', 'pl_graph', 'subgraph_size', 'min_pho'], {'max_count': 'max_count'}), '(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)\n', (636, 700), False, 'from strawberryfields.apps import data, sample, subgraph, plot\n'), ((1212, 1221), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1218, 1221), True, 'import numpy as np\n'), ((1359, 1403), 'strawberryfields.Program', 'sf.Program', (['num_subsystem'], {'name': '"""remote_job"""'}), "(num_subsystem, name='remote_job')\n", (1369, 1403), True, 'import strawberryfields as sf\n'), ((1416, 1440), 'strawberryfields.utils.random_interferometer', 'random_interferometer', (['(4)'], {}), '(4)\n', (1437, 1440), False, 'from strawberryfields.utils import random_interferometer\n'), ((1571, 1586), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1581, 1586), False, 'from strawberryfields import ops\n'), ((1614, 1629), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1624, 1629), False, 'from strawberryfields import ops\n'), ((1657, 1672), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1667, 1672), False, 'from strawberryfields import ops\n'), ((1756, 1777), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (1774, 1777), False, 'from strawberryfields import ops\n'), ((1817, 1841), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (1827, 1841), False, 'from strawberryfields import ops\n'), ((1869, 1885), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (1878, 1885), False, 'from strawberryfields import ops\n'), ((1905, 1928), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (1915, 1928), False, 'from strawberryfields import ops\n'), ((2018, 2039), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (2036, 2039), False, 'from strawberryfields import ops\n'), ((2079, 2103), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (2089, 2103), False, 'from strawberryfields import ops\n'), ((2131, 2147), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (2140, 2147), False, 'from strawberryfields import ops\n'), ((2167, 2190), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (2177, 2190), False, 'from strawberryfields import ops\n'), ((2219, 2236), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (2234, 2236), False, 'from strawberryfields import ops\n'), ((1163, 1172), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1169, 1172), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| [
"logging.getLogger",
"Astronomy.apparent_to_J2000",
"datetime.datetime.utcfromtimestamp",
"numpy.log10",
"math.log",
"math.cos",
"numpy.array",
"Math.clusters.find_clusters",
"Astronomy.J2000_to_apparent",
"numpy.genfromtxt",
"numpy.arange",
"re.search",
"os.path.exists",
"readline.parse_and_bind",
"support.text.select_files",
"numpy.max",
"os.path.isdir",
"Astronomy.Ephem.calibrator",
"DatesTimes.UnixTime_to_MJD",
"numpy.meshgrid",
"h5py.File.__init__",
"glob.glob",
"matplotlib.dates.date2num",
"os.path.splitext",
"Astronomy.RaDec_to_AzEl",
"support.PropertiedClass.__init__",
"Astronomy.Ephem.ephem.Sun",
"DatesTimes.day_of_year",
"Astronomy.AzEl_to_RaDec",
"os.makedirs",
"os.getcwd",
"os.chdir",
"Astronomy.DSN_coordinates.DSS",
"os.path.basename"
]
| [((2597, 2637), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (2620, 2637), False, 'import readline\n'), ((2648, 2675), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2665, 2675), False, 'import logging\n'), ((43188, 43265), 'numpy.genfromtxt', 'NP.genfromtxt', (['filename'], {'dtype': 'None', 'names': 'None', 'skip_header': '(1)', 'encoding': 'None'}), '(filename, dtype=None, names=None, skip_header=1, encoding=None)\n', (43201, 43265), True, 'import numpy as NP\n'), ((45758, 45769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45767, 45769), False, 'import os\n'), ((46816, 46829), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (46824, 46829), False, 'import os\n'), ((7573, 7620), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (7590, 7620), False, 'import logging\n'), ((8097, 8144), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (8114, 8144), False, 'import logging\n'), ((14217, 14378), 'numpy.genfromtxt', 'NP.genfromtxt', (['(self.sessionpath + filename)'], {'delimiter': 'delimiter', 'dtype': 'None', 'names': 'names', 'case_sensitive': '"""lower"""', 'skip_header': 'skip_header', 'encoding': 'None'}), "(self.sessionpath + filename, delimiter=delimiter, dtype=None,\n names=names, case_sensitive='lower', skip_header=skip_header, encoding=None\n )\n", (14230, 14378), True, 'import numpy as NP\n'), ((22713, 22778), 'numpy.arange', 'NP.arange', (['(-width / 2)', '(width / 2 + self.xstep / 2)', '(self.xstep / 2)'], {}), '(-width / 2, width / 2 + self.xstep / 2, self.xstep / 2)\n', (22722, 22778), True, 'import numpy as NP\n'), ((22830, 22897), 'numpy.arange', 'NP.arange', (['(-height / 2)', '(height / 2 + self.ystep / 2)', '(self.ystep / 2)'], {}), '(-height / 2, height / 2 + self.ystep / 2, self.ystep / 2)\n', (22839, 22897), True, 'import numpy as NP\n'), ((28963, 28997), 'numpy.array', 'NP.array', (["self.data['xdec_offset']"], {}), "(self.data['xdec_offset'])\n", (28971, 28997), True, 'import numpy as NP\n'), ((29028, 29061), 'numpy.array', 'NP.array', (["self.data['dec_offset']"], {}), "(self.data['dec_offset'])\n", (29036, 29061), True, 'import numpy as NP\n'), ((30517, 30562), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Recording')"], {}), "(logger.name + '.Recording')\n", (30534, 30562), False, 'import logging\n'), ((31174, 31209), 'h5py.File.__init__', 'h5py.File.__init__', (['self', 'name', '"""w"""'], {}), "(self, name, 'w')\n", (31192, 31209), False, 'import h5py\n'), ((32576, 32619), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Session')"], {}), "(logger.name + '.Session')\n", (32593, 32619), False, 'import logging\n'), ((36501, 36545), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Spectrum')"], {}), "(logger.name + '.Spectrum')\n", (36518, 36545), False, 'import logging\n'), ((41525, 41546), 'numpy.max', 'NP.max', (['self.spectrum'], {}), '(self.spectrum)\n', (41531, 41546), True, 'import numpy as NP\n'), ((45289, 45304), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (45298, 45304), False, 'import glob\n'), ((45825, 45858), 'os.chdir', 'os.chdir', (['local_dirs.projects_dir'], {}), '(local_dirs.projects_dir)\n', (45833, 45858), False, 'import os\n'), ((46154, 46194), 'os.chdir', 'os.chdir', (["(projectpath + '/Observations/')"], {}), "(projectpath + '/Observations/')\n", (46162, 46194), False, 'import os\n'), ((7710, 7725), 'Astronomy.DSN_coordinates.DSS', 'coords.DSS', (['dss'], {}), '(dss)\n', (7720, 7725), True, 'import Astronomy.DSN_coordinates as coords\n'), ((11986, 12024), 'support.PropertiedClass.__init__', 'support.PropertiedClass.__init__', (['self'], {}), '(self)\n', (12018, 12024), False, 'import support\n'), ((12072, 12120), 'logging.getLogger', 'logging.getLogger', (["(self.parent.name + '.Channel')"], {}), "(self.parent.name + '.Channel')\n", (12089, 12120), False, 'import logging\n'), ((23444, 23497), 'numpy.meshgrid', 'NP.meshgrid', (["self.data['grid_x']", "self.data['grid_y']"], {}), "(self.data['grid_x'], self.data['grid_y'])\n", (23455, 23497), True, 'import numpy as NP\n'), ((24731, 24810), 'Astronomy.AzEl_to_RaDec', 'A.AzEl_to_RaDec', (['azimuth', 'elevation', 'self.latitude', '(-self.longitude)', 'time_tuple'], {}), '(azimuth, elevation, self.latitude, -self.longitude, time_tuple)\n', (24746, 24810), True, 'import Astronomy as A\n'), ((25326, 25348), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (25344, 25348), True, 'import DatesTimes as DT\n'), ((25489, 25557), 'Astronomy.apparent_to_J2000', 'A.apparent_to_J2000', (['MJD', 'UT', 'ra', 'dec', 'self.longitude', 'self.latitude'], {}), '(MJD, UT, ra, dec, self.longitude, self.latitude)\n', (25508, 25557), True, 'import Astronomy as A\n'), ((26099, 26121), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (26117, 26121), True, 'import DatesTimes as DT\n'), ((26281, 26357), 'Astronomy.J2000_to_apparent', 'A.J2000_to_apparent', (['MJD', 'UT', '(ra2000 * math.pi / 12)', '(dec2000 * math.pi / 180)'], {}), '(MJD, UT, ra2000 * math.pi / 12, dec2000 * math.pi / 180)\n', (26300, 26357), True, 'import Astronomy as A\n'), ((27075, 27143), 'Astronomy.RaDec_to_AzEl', 'A.RaDec_to_AzEl', (['ra', 'dec', 'self.latitude', 'self.longitude', '(year, doy)'], {}), '(ra, dec, self.latitude, self.longitude, (year, doy))\n', (27090, 27143), True, 'import Astronomy as A\n'), ((28094, 28108), 'Astronomy.Ephem.ephem.Sun', 'AE.ephem.Sun', ([], {}), '()\n', (28106, 28108), True, 'import Astronomy.Ephem as AE\n'), ((28131, 28152), 'Astronomy.Ephem.calibrator', 'AE.calibrator', (['source'], {}), '(source)\n', (28144, 28152), True, 'import Astronomy.Ephem as AE\n'), ((28283, 28347), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (["self.data['unixtime'][count]"], {}), "(self.data['unixtime'][count])\n", (28317, 28347), False, 'import datetime\n'), ((33606, 33638), 'os.path.exists', 'os.path.exists', (['self.session_dir'], {}), '(self.session_dir)\n', (33620, 33638), False, 'import os\n'), ((33646, 33685), 'os.makedirs', 'os.makedirs', (['self.session_dir'], {'mode': '(509)'}), '(self.session_dir, mode=509)\n', (33657, 33685), False, 'import os\n'), ((34858, 34888), 'os.path.splitext', 'os.path.splitext', (['name_pattern'], {}), '(name_pattern)\n', (34874, 34888), False, 'import os\n'), ((35293, 35320), 're.search', 're.search', (['"""HDF5"""', 'datapath'], {}), "('HDF5', datapath)\n", (35302, 35320), False, 'import re\n'), ((35874, 35889), 'glob.glob', 'glob.glob', (['full'], {}), '(full)\n', (35883, 35889), False, 'import glob\n'), ((35918, 35949), 'support.text.select_files', 'support.text.select_files', (['full'], {}), '(full)\n', (35943, 35949), False, 'import support\n'), ((38060, 38081), 'math.log', 'math.log', (['num_chan', '(2)'], {}), '(num_chan, 2)\n', (38068, 38081), False, 'import math\n'), ((38304, 38329), 'math.log', 'math.log', (['num_chans_in', '(2)'], {}), '(num_chans_in, 2)\n', (38312, 38329), False, 'import math\n'), ((41563, 41586), 'numpy.array', 'NP.array', (['self.spectrum'], {}), '(self.spectrum)\n', (41571, 41586), True, 'import numpy as NP\n'), ((41624, 41643), 'numpy.log10', 'NP.log10', (['norm_spec'], {}), '(norm_spec)\n', (41632, 41643), True, 'import numpy as NP\n'), ((46232, 46261), 'os.chdir', 'os.chdir', (['local_dirs.hdf5_dir'], {}), '(local_dirs.hdf5_dir)\n', (46240, 46261), False, 'import os\n'), ((46550, 46561), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46559, 46561), False, 'import os\n'), ((17776, 17814), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['tm'], {}), '(tm)\n', (17810, 17814), False, 'import datetime\n'), ((21262, 21286), 'Math.clusters.find_clusters', 'VQ.find_clusters', (['coords'], {}), '(coords)\n', (21278, 21286), True, 'import Math.clusters as VQ\n'), ((35357, 35392), 're.search', 're.search', (['"""project_data"""', 'datapath'], {}), "('project_data', datapath)\n", (35366, 35392), False, 'import re\n'), ((39777, 39795), 'numpy.arange', 'NP.arange', (['n_chans'], {}), '(n_chans)\n', (39786, 39795), True, 'import numpy as NP\n'), ((45371, 45390), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (45384, 45390), False, 'import os\n'), ((46301, 46330), 'os.chdir', 'os.chdir', (['local_dirs.fits_dir'], {}), '(local_dirs.fits_dir)\n', (46309, 46330), False, 'import os\n'), ((17893, 17910), 'matplotlib.dates.date2num', 'MPLd.date2num', (['dt'], {}), '(dt)\n', (17906, 17910), True, 'import matplotlib.dates as MPLd\n'), ((24407, 24448), 'DatesTimes.day_of_year', 'DT.day_of_year', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (24421, 24448), True, 'import DatesTimes as DT\n'), ((35439, 35503), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '[0-9].pkl')"], {}), "(datapath + name_pattern + '[0-9].pkl')\n", (35464, 35503), False, 'import support\n'), ((35511, 35538), 're.search', 're.search', (['"""FITS"""', 'datapath'], {}), "('FITS', datapath)\n", (35520, 35538), False, 'import re\n'), ((36953, 36982), 'math.log', 'math.log', (['est_num_chan_out', '(2)'], {}), '(est_num_chan_out, 2)\n', (36961, 36982), False, 'import math\n'), ((28771, 28787), 'math.cos', 'math.cos', (['decrad'], {}), '(decrad)\n', (28779, 28787), False, 'import math\n'), ((35560, 35620), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '.fits')"], {}), "(datapath + name_pattern + '.fits')\n", (35585, 35620), False, 'import support\n'), ((45414, 45436), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (45430, 45436), False, 'import os\n'), ((46477, 46488), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46486, 46488), False, 'import os\n')] |
import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| [
"numpy.random.normal",
"random.shuffle",
"CGRE_Model.CGRE",
"dataset.DataSet",
"wordreps.WordReps",
"numpy.hstack",
"tensorflow.Session",
"Eval.eval_SemEval",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"numpy.save"
]
| [((7593, 7603), 'wordreps.WordReps', 'WordReps', ([], {}), '()\n', (7601, 7603), False, 'from wordreps import WordReps\n'), ((7714, 7730), 'numpy.zeros', 'np.zeros', (['WR.dim'], {}), '(WR.dim)\n', (7722, 7730), True, 'import numpy as np\n'), ((8127, 8199), 'dataset.DataSet', 'DataSet', (['corpus', 'Train_dataset', 'Test_dataset', 'labels_type', 'Reverse_pairs'], {}), '(corpus, Train_dataset, Test_dataset, labels_type, Reverse_pairs)\n', (8134, 8199), False, 'from dataset import DataSet\n'), ((828, 866), 'CGRE_Model.CGRE', 'CGRE_Model.CGRE', (['activ', 'self.batchSize'], {}), '(activ, self.batchSize)\n', (843, 866), False, 'import CGRE_Model\n'), ((2289, 2301), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2299, 2301), True, 'import tensorflow as tf\n'), ((4539, 4594), 'numpy.save', 'np.save', (['"""res/Pair_Embeddings.npy"""', 'Pair_Embeddings_dic'], {}), "('res/Pair_Embeddings.npy', Pair_Embeddings_dic)\n", (4546, 4594), True, 'import numpy as np\n'), ((5540, 5587), 'numpy.hstack', 'np.hstack', (['(Pair_Embeddings1, Pair_Embeddings2)'], {}), '((Pair_Embeddings1, Pair_Embeddings2))\n', (5549, 5587), True, 'import numpy as np\n'), ((2318, 2351), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2349, 2351), True, 'import tensorflow as tf\n'), ((2529, 2555), 'random.shuffle', 'random.shuffle', (['Train_list'], {}), '(Train_list)\n', (2543, 2555), False, 'import random\n'), ((7827, 7856), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7843, 7856), True, 'import numpy as np\n'), ((7892, 7921), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7908, 7921), True, 'import numpy as np\n'), ((2671, 2708), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Test"""'], {}), "(Pair_Embeddings, 'Test')\n", (2683, 2708), False, 'from Eval import eval_SemEval\n'), ((2725, 2763), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Valid"""'], {}), "(Pair_Embeddings, 'Valid')\n", (2737, 2763), False, 'from Eval import eval_SemEval\n'), ((2780, 2816), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""All"""'], {}), "(Pair_Embeddings, 'All')\n", (2792, 2816), False, 'from Eval import eval_SemEval\n')] |
from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.ctrl_transfer(BM_REQUEST_TYPE, B_REQUEST, W_VALUE, W_INDEX, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| [
"traffic_light.error.TrafficLightError",
"traffic_light.error.MultipleTrafficLightsError",
"functools.partial"
]
| [((3429, 3474), 'functools.partial', 'functools.partial', (['self.set_led', 'color', 'state'], {}), '(self.set_led, color, state)\n', (3446, 3474), False, 'import functools\n'), ((1131, 1184), 'traffic_light.error.TrafficLightError', 'TrafficLightError', (['"""Cleware traffic light not found!"""'], {}), "('Cleware traffic light not found!')\n", (1148, 1184), False, 'from traffic_light.error import TrafficLightError, MultipleTrafficLightsError\n'), ((756, 912), 'traffic_light.error.MultipleTrafficLightsError', 'MultipleTrafficLightsError', (['"""No address is given and there are multiple devices conected! Use \'print_devices\' to see a list of connected devices."""'], {}), '(\n "No address is given and there are multiple devices conected! Use \'print_devices\' to see a list of connected devices."\n )\n', (782, 912), False, 'from traffic_light.error import TrafficLightError, MultipleTrafficLightsError\n')] |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
async def sample_analyze_orchestration_app_luis_response_async():
# [START analyze_orchestration_app_luis_response]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
clu_key = os.environ["AZURE_CONVERSATIONS_KEY"]
project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"]
deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"]
# analyze query
client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key))
async with client:
query = "Reserve a table for 2 at the Italian restaurant"
result = await client.analyze_conversation(
task={
"kind": "Conversation",
"analysisInput": {
"conversationItem": {
"participantId": "1",
"id": "1",
"modality": "text",
"language": "en",
"text": query
},
"isLoggingEnabled": False
},
"parameters": {
"projectName": project_name,
"deploymentName": deployment_name,
"verbose": True
}
}
)
# view result
print("query: {}".format(result["result"]["query"]))
print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"]))
# top intent
top_intent = result["result"]["prediction"]["topIntent"]
print("top intent: {}".format(top_intent))
top_intent_object = result["result"]["prediction"]["intents"][top_intent]
print("confidence score: {}".format(top_intent_object["confidenceScore"]))
print("project kind: {}".format(top_intent_object["targetProjectKind"]))
if top_intent_object["targetProjectKind"] == "Luis":
print("\nluis response:")
luis_response = top_intent_object["result"]["prediction"]
print("top intent: {}".format(luis_response["topIntent"]))
print("\nentities:")
for entity in luis_response["entities"]:
print("\n{}".format(entity))
# [END analyze_orchestration_app_luis_response]
async def main():
await sample_analyze_orchestration_app_luis_response_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | [
"asyncio.get_event_loop",
"azure.core.credentials.AzureKeyCredential"
]
| [((3586, 3610), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3608, 3610), False, 'import asyncio\n'), ((1731, 1758), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['clu_key'], {}), '(clu_key)\n', (1749, 1758), False, 'from azure.core.credentials import AzureKeyCredential\n')] |
import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
class Styles(object):
"""
Loads and holds cache of scheme styles
Also generates style header
"""
def __init__(self):
self.schemes = {}
CSS_CLASSES = {
'comment': 'comment',
'function': 'entity.name.function',
'type': 'entity.name.type',
'operator': 'keyword.operator',
'keyword': 'keyword.declaration',
'tyvar': 'variable.generic',
'error': 'sublimehaskell.mark.error',
'warning': 'sublimehaskell.mark.warning',
'hint': 'sublimehaskell.mark.hint'
}
def load_scheme(self, scheme_path):
if scheme_path not in self.schemes:
scheme_res = sublime.load_resource(scheme_path)
if scheme_res:
# Go through all styles and collect scope/foreground/fontStyle etc.
# Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML.
self.schemes[scheme_path] = self.collect_sublime_scheme(json.loads(scheme_res)) \
if scheme_path.endswith('.sublime-color-scheme') \
else self.collect_textmate_scheme(ElementTree.fromstring(scheme_res))
return self.schemes.get(scheme_path, {})
def collect_textmate_scheme(self, scheme_tree):
scheme = {}
for style in scheme_tree.findall(".//dict[key='scope']"):
try:
cur_style = {}
cur_tag = None
for elem in style.iter():
if elem.tag == 'key':
cur_tag = elem.text # We are going to fill it next time
elif elem.tag == 'string' and cur_tag is not None:
cur_style[cur_tag] = elem.text
cur_tag = None
if 'scope' in cur_style:
scheme[cur_style['scope']] = cur_style
except ValueError:
pass
return scheme
def collect_sublime_scheme(self, scheme_dict):
scheme = {}
for rule in scheme_dict.get('rules', []):
scope = rule.get('scope', '')
if scope:
scheme[scope] = rule
return scheme
def gen_style(self, scheme_path):
scheme = self.load_scheme(scheme_path)
parts = []
parts.append("<style>")
parts.append("a { text-decoration: underline; }")
# generate CSS style for each class
for cls, scope in self.CSS_CLASSES.items():
# find scope or its parent in scheme
scope_parts = scope.split('.')
for css_scope in reversed(['.'.join(scope_parts[0:i+1]) for i in range(0, len(scope_parts))]):
if css_scope in scheme: # Found some scope, fill style class
style_parts = []
if 'foreground' in scheme[css_scope]:
style_parts.append("color: {0}".format(scheme[css_scope]['foreground']))
# Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name
font_style = scheme[css_scope].get('font_style', scheme[css_scope].get('fontStyle', ''))
if font_style:
style_parts.append("font-style: {0}".format(font_style))
parts.append(".{0} {{ {1} }}".format(cls, "; ".join(style_parts)))
break
parts.append("</style>")
return "".join(parts)
class SublimeHaskellHoverPopup(object):
# HTML style formatting
STYLES = Styles()
def __init__(self, view, filename, point, hover_zone):
super().__init__()
self.view = view
self.filename = filename
self.point = point
self.hover_zone = hover_zone
self.line = view.rowcol(point)[0]
self.shown = False
def do_hover(self):
if self.hover_zone == sublime.HOVER_TEXT:
qsymbol = Common.get_qualified_symbol_at_point(self.view, self.point)
## print('hover: qualified symbol {0}'.format(qsymbol))
module_word = qsymbol.module
ident = qsymbol.name
if module_word is not None and ident is None:
# TODO: Any ideas for popup about module?
pass
elif ident is not None:
whois_name = qsymbol.qualified_name()
full_name = qsymbol.full_name()
# Try get type of hovered symbol
typed_expr = None
if types.SourceHaskellTypeCache().has(self.filename):
typed_expr = self.get_type(types.SourceHaskellTypeCache().get(self.filename), whois_name)
else:
project_name = Common.locate_cabal_project_from_view(self.view)[1]
point_rgn = sublime.Region(self.point, self.point)
typed_expr = self.get_type(types.get_type_view(self.view, project_name, point_rgn), whois_name)
# Try whois
suggest_import = False
decl = Utils.head_of(BackendManager.active_backend().whois(whois_name, self.filename))
if not decl:
suggest_import = True
decl = Utils.head_of(BackendManager.active_backend().lookup(full_name, self.filename))
self.create_symbol_popup(typed_expr, decl, suggest_import)
elif self.hover_zone == sublime.HOVER_GUTTER:
errs = [err for err in ParseOutput.MARKER_MANAGER.marks_for_view(self.view) if err.region.start.line == self.line]
if errs:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
for err in errs:
msg = UnicodeOpers.use_unicode_operators(symbols.escape_text(err.message))
# Decorate first word with style
decors = {
'Error': 'error',
'Warning': 'warning',
'Hint': 'hint'
}
for dec, dec_style in decors.items():
msg = msg.replace(dec, u'<span class="{0}">{1}</span>'.format(dec_style, dec))
popup_parts.append(u'<p>{0}</p>'.format(msg))
if err.correction is not None:
popup_parts.append(err.correction.popup())
popup_text = u''.join(popup_parts)
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
def create_symbol_popup(self, typed_expr, decl, suggest_import):
if typed_expr or decl:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
if typed_expr:
popup_parts.append(u'<p><span class="function">{0}</span>{1}</p>'.format(
typed_expr.substr(self.view),
symbols.format_type(UnicodeOpers.use_unicode_operators(' :: {0}'.format(typed_expr.typename)))))
if decl:
popup_msg = [u'<a href="import:{0}">Add import</a>'.format(urllib.parse.quote_plus(decl.name))] \
if suggest_import else []
popup_parts.append(decl.popup(popup_msg))
popup_text = u''.join(popup_parts)
if not self.shown:
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
else:
self.view.update_popup(popup_text)
def get_type(self, type_list, qual_name):
filt_types = [t for t in type_list
if t.substr(self.view) == qual_name and t.region(self.view).contains(self.point)]
return Utils.head_of(filt_types)
def on_navigate(self, url):
if self.view.is_popup_visible():
self.view.hide_popup()
if url[0:4] == 'http':
webbrowser.open(url)
elif url[0:8] == 'autofix:':
rgn = symbols.Region.from_str(url[8:])
ParseOutput.MARKER_MANAGER.apply_autocorrect(self.view, rgn)
elif url[0:7] == "import:":
decl_name = urllib.parse.unquote(url[7:])
self.view.run_command('sublime_haskell_insert_import_for_symbol',
{'filename': self.view.file_name(),
'decl': decl_name})
else:
self.view.window().open_file(url, sublime.ENCODED_POSITION | sublime.TRANSIENT)
def on_hide(self):
self.shown = False
| [
"json.loads",
"SublimeHaskell.symbols.Region.from_str",
"webbrowser.open",
"SublimeHaskell.parseoutput.MARKER_MANAGER.apply_autocorrect",
"sublime.Region",
"SublimeHaskell.types.get_type_view",
"SublimeHaskell.parseoutput.MARKER_MANAGER.marks_for_view",
"SublimeHaskell.symbols.escape_text",
"sublime.load_resource",
"SublimeHaskell.types.SourceHaskellTypeCache",
"SublimeHaskell.sublime_haskell_common.get_qualified_symbol_at_point",
"SublimeHaskell.internals.utils.head_of",
"xml.etree.ElementTree.fromstring",
"SublimeHaskell.internals.backend_mgr.active_backend",
"SublimeHaskell.sublime_haskell_common.locate_cabal_project_from_view"
]
| [((8691, 8716), 'SublimeHaskell.internals.utils.head_of', 'Utils.head_of', (['filt_types'], {}), '(filt_types)\n', (8704, 8716), True, 'import SublimeHaskell.internals.utils as Utils\n'), ((1436, 1470), 'sublime.load_resource', 'sublime.load_resource', (['scheme_path'], {}), '(scheme_path)\n', (1457, 1470), False, 'import sublime\n'), ((4675, 4734), 'SublimeHaskell.sublime_haskell_common.get_qualified_symbol_at_point', 'Common.get_qualified_symbol_at_point', (['self.view', 'self.point'], {}), '(self.view, self.point)\n', (4711, 4734), True, 'import SublimeHaskell.sublime_haskell_common as Common\n'), ((8878, 8898), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (8893, 8898), False, 'import webbrowser\n'), ((8962, 8994), 'SublimeHaskell.symbols.Region.from_str', 'symbols.Region.from_str', (['url[8:]'], {}), '(url[8:])\n', (8985, 8994), True, 'import SublimeHaskell.symbols as symbols\n'), ((9011, 9071), 'SublimeHaskell.parseoutput.MARKER_MANAGER.apply_autocorrect', 'ParseOutput.MARKER_MANAGER.apply_autocorrect', (['self.view', 'rgn'], {}), '(self.view, rgn)\n', (9055, 9071), True, 'import SublimeHaskell.parseoutput as ParseOutput\n'), ((1736, 1758), 'json.loads', 'json.loads', (['scheme_res'], {}), '(scheme_res)\n', (1746, 1758), False, 'import json\n'), ((1887, 1921), 'xml.etree.ElementTree.fromstring', 'ElementTree.fromstring', (['scheme_res'], {}), '(scheme_res)\n', (1909, 1921), False, 'from xml.etree import ElementTree\n'), ((5558, 5596), 'sublime.Region', 'sublime.Region', (['self.point', 'self.point'], {}), '(self.point, self.point)\n', (5572, 5596), False, 'import sublime\n'), ((6228, 6280), 'SublimeHaskell.parseoutput.MARKER_MANAGER.marks_for_view', 'ParseOutput.MARKER_MANAGER.marks_for_view', (['self.view'], {}), '(self.view)\n', (6269, 6280), True, 'import SublimeHaskell.parseoutput as ParseOutput\n'), ((5256, 5286), 'SublimeHaskell.types.SourceHaskellTypeCache', 'types.SourceHaskellTypeCache', ([], {}), '()\n', (5284, 5286), True, 'import SublimeHaskell.types as types\n'), ((5474, 5522), 'SublimeHaskell.sublime_haskell_common.locate_cabal_project_from_view', 'Common.locate_cabal_project_from_view', (['self.view'], {}), '(self.view)\n', (5511, 5522), True, 'import SublimeHaskell.sublime_haskell_common as Common\n'), ((5644, 5699), 'SublimeHaskell.types.get_type_view', 'types.get_type_view', (['self.view', 'project_name', 'point_rgn'], {}), '(self.view, project_name, point_rgn)\n', (5663, 5699), True, 'import SublimeHaskell.types as types\n'), ((6531, 6563), 'SublimeHaskell.symbols.escape_text', 'symbols.escape_text', (['err.message'], {}), '(err.message)\n', (6550, 6563), True, 'import SublimeHaskell.symbols as symbols\n'), ((5818, 5849), 'SublimeHaskell.internals.backend_mgr.active_backend', 'BackendManager.active_backend', ([], {}), '()\n', (5847, 5849), True, 'import SublimeHaskell.internals.backend_mgr as BackendManager\n'), ((5354, 5384), 'SublimeHaskell.types.SourceHaskellTypeCache', 'types.SourceHaskellTypeCache', ([], {}), '()\n', (5382, 5384), True, 'import SublimeHaskell.types as types\n'), ((5996, 6027), 'SublimeHaskell.internals.backend_mgr.active_backend', 'BackendManager.active_backend', ([], {}), '()\n', (6025, 6027), True, 'import SublimeHaskell.internals.backend_mgr as BackendManager\n')] |
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize() | [
"tensortools.optimize.optim_utils._check_cpd_inputs",
"numpy.copy",
"tensortools.operations.khatri_rao",
"tensortools.operations.unfold",
"tensortools.optimize.FitResult",
"tensortools.optimize.optim_utils._get_initial_ktensor",
"numpy.linalg.norm"
]
| [((3073, 3083), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (3080, 3083), True, 'import numpy as np\n'), ((3099, 3122), 'numpy.linalg.norm', 'np.linalg.norm', (['X[mask]'], {}), '(X[mask])\n', (3113, 3122), True, 'import numpy as np\n'), ((3148, 3186), 'tensortools.optimize.optim_utils._check_cpd_inputs', 'optim_utils._check_cpd_inputs', (['X', 'rank'], {}), '(X, rank)\n', (3177, 3186), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3229, 3290), 'tensortools.optimize.optim_utils._get_initial_ktensor', 'optim_utils._get_initial_ktensor', (['init', 'X', 'rank', 'random_state'], {}), '(init, X, rank, random_state)\n', (3261, 3290), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3304, 3339), 'tensortools.optimize.FitResult', 'FitResult', (['U', '"""NCP_HALS"""'], {}), "(U, 'NCP_HALS', **options)\n", (3313, 3339), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((4188, 4210), 'tensortools.operations.khatri_rao', 'khatri_rao', (['components'], {}), '(components)\n', (4198, 4210), False, 'from tensortools.operations import unfold, khatri_rao\n'), ((4227, 4239), 'tensortools.operations.unfold', 'unfold', (['X', 'n'], {}), '(X, n)\n', (4233, 4239), False, 'from tensortools.operations import unfold, khatri_rao\n')] |
"""
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
| [
"root_numpy.array2root",
"copy.deepcopy",
"pickle.dump",
"pickle.load",
"numpy.logical_or",
"os.path.isfile",
"numpy.stack",
"numpy.array",
"numpy.asfarray",
"root_numpy.root2array",
"numpy.core.records.fromarrays",
"uproot.open",
"rootpy.io.root_open",
"pandas.DataFrame",
"numpy.percentile",
"warnings.warn"
]
| [((1340, 1378), 'numpy.percentile', 'np.percentile', (['signal_data', 'percentile'], {}), '(signal_data, percentile)\n', (1353, 1378), True, 'import numpy as np\n'), ((3382, 3406), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3396, 3406), False, 'import os\n'), ((1419, 1476), 'numpy.logical_or', 'np.logical_or', (['(bkg_data < lower_cut)', '(bkg_data > upper_cut)'], {}), '(bkg_data < lower_cut, bkg_data > upper_cut)\n', (1432, 1476), True, 'import numpy as np\n'), ((3841, 3900), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['[new_branch]'], {'names': 'branch_name'}), '([new_branch], names=branch_name)\n', (3867, 3900), True, 'import numpy as np\n'), ((3909, 3983), 'root_numpy.array2root', 'array2root', ([], {'arr': 'arr', 'filename': 'filename', 'treename': 'treename', 'mode': 'write_mode'}), '(arr=arr, filename=filename, treename=treename, mode=write_mode)\n', (3919, 3983), False, 'from root_numpy import array2root\n'), ((9929, 9978), 'numpy.stack', 'np.stack', (['[loaded[branch] for branch in branches]'], {}), '([loaded[branch] for branch in branches])\n', (9937, 9978), True, 'import numpy as np\n'), ((10302, 10339), 'numpy.array', 'np.array', (['[val[0] for val in data_in]'], {}), '([val[0] for val in data_in])\n', (10310, 10339), True, 'import numpy as np\n'), ((10507, 10524), 'numpy.array', 'np.array', (['data_in'], {}), '(data_in)\n', (10515, 10524), True, 'import numpy as np\n'), ((11449, 11469), 'numpy.asfarray', 'np.asfarray', (['data_in'], {}), '(data_in)\n', (11460, 11469), True, 'import numpy as np\n'), ((12447, 12479), 'root_numpy.root2array', 'root_numpy.root2array', ([], {}), '(**data_in)\n', (12468, 12479), False, 'import root_numpy\n'), ((12552, 12569), 'numpy.array', 'np.array', (['data_in'], {}), '(data_in)\n', (12560, 12569), True, 'import numpy as np\n'), ((12779, 12835), 'pandas.DataFrame', 'pd.DataFrame', (['data_in'], {'columns': 'columns', 'index': 'root_index'}), '(data_in, columns=columns, index=root_index)\n', (12791, 12835), True, 'import pandas as pd\n'), ((3421, 3450), 'rootpy.io.root_open', 'root_open', (['filename'], {'mode': '"""a"""'}), "(filename, mode='a')\n", (3430, 3450), False, 'from rootpy.io import root_open\n'), ((9715, 9748), 'uproot.open', 'uproot.open', (["data_in['filenames']"], {}), "(data_in['filenames'])\n", (9726, 9748), False, 'import uproot\n'), ((10633, 10655), 'copy.deepcopy', 'copy.deepcopy', (['data_in'], {}), '(data_in)\n', (10646, 10655), False, 'import copy\n'), ((11365, 11409), 'warnings.warn', 'warnings.warn', (['"""Could not force float array"""'], {}), "('Could not force float array')\n", (11378, 11409), False, 'import warnings\n'), ((13503, 13536), 'uproot.open', 'uproot.open', (["data_in['filenames']"], {}), "(data_in['filenames'])\n", (13514, 13536), False, 'import uproot\n'), ((18030, 18044), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18041, 18044), False, 'import pickle\n'), ((17057, 17111), 'pickle.dump', 'pickle.dump', (['return_value', 'f', 'meta_cfg.PICKLE_PROTOCOL'], {}), '(return_value, f, meta_cfg.PICKLE_PROTOCOL)\n', (17068, 17111), False, 'import pickle\n')] |
"""
delete all .pyc bytecode files in a directory tree: use the
command line arg as root if given, else current working dir
"""
import os, sys
findonly = False
rootdir = os.getcwd() if len(sys.argv) == 1 else sys.argv[1]
found = removed = 0
for (thisDirLevel, subsHere, filesHere) in os.walk(rootdir):
for filename in filesHere:
if filename.endswith('.pyc'):
fullname = os.path.join(thisDirLevel, filename)
print('=>', fullname)
if not findonly:
try:
os.remove(fullname)
removed += 1
except:
type, inst = sys.exc_info()[:2]
print('*'*4, 'Failed:', filename, type, inst)
found += 1
print('Found', found, 'files, removed', removed)
| [
"os.path.join",
"os.getcwd",
"sys.exc_info",
"os.walk",
"os.remove"
]
| [((286, 302), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (293, 302), False, 'import os, sys\n'), ((171, 182), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (180, 182), False, 'import os, sys\n'), ((396, 432), 'os.path.join', 'os.path.join', (['thisDirLevel', 'filename'], {}), '(thisDirLevel, filename)\n', (408, 432), False, 'import os, sys\n'), ((539, 558), 'os.remove', 'os.remove', (['fullname'], {}), '(fullname)\n', (548, 558), False, 'import os, sys\n'), ((649, 663), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (661, 663), False, 'import os, sys\n')] |
import boto3
from logger import logger
class States:
def __init__(self, boto3_session=None):
self.boto3_session = boto3_session or boto3.Session()
self.client = self.boto3_session.client('stepfunctions')
def fail(self, task_token, error, cause):
params = dict(taskToken=task_token, error=error, cause=cause)
logger.info('SEND TASK FAILURE %s', logger.json(params))
return self.client.send_task_failure(**params)
def heartbeat(self, task_token):
params = dict(taskToken=task_token)
logger.info('SEND TASK HEARTBEAT %s', logger.json(params))
return self.client.send_task_heartbeat(**params)
def succeed(self, task_token, output):
params = dict(taskToken=task_token, output=output)
logger.info('SEND TASK SUCCESS %s', logger.json(params))
return self.client.send_task_success(**params)
| [
"boto3.Session",
"logger.logger.json"
]
| [((146, 161), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (159, 161), False, 'import boto3\n'), ((388, 407), 'logger.logger.json', 'logger.json', (['params'], {}), '(params)\n', (399, 407), False, 'from logger import logger\n'), ((592, 611), 'logger.logger.json', 'logger.json', (['params'], {}), '(params)\n', (603, 611), False, 'from logger import logger\n'), ((817, 836), 'logger.logger.json', 'logger.json', (['params'], {}), '(params)\n', (828, 836), False, 'from logger import logger\n')] |
from typing import Any, Dict, Optional, Type, Union
from cx_const import Light, PredefinedActionsMapping
from cx_core.color_helper import get_color_wheel
from cx_core.controller import action
from cx_core.feature_support.light import LightSupport
from cx_core.integration import EventData
from cx_core.integration.deconz import DeCONZIntegration
from cx_core.integration.z2m import Z2MIntegration
from cx_core.release_hold_controller import ReleaseHoldController
from cx_core.stepper import Stepper
from cx_core.stepper.circular_stepper import CircularStepper
from cx_core.stepper.minmax_stepper import MinMaxStepper
from cx_core.type_controller import Entity, TypeController
DEFAULT_MANUAL_STEPS = 10
DEFAULT_AUTOMATIC_STEPS = 10
DEFAULT_MIN_BRIGHTNESS = 1
DEFAULT_MAX_BRIGHTNESS = 255
DEFAULT_MIN_WHITE_VALUE = 1
DEFAULT_MAX_WHITE_VALUE = 255
DEFAULT_MIN_COLOR_TEMP = 153
DEFAULT_MAX_COLOR_TEMP = 500
DEFAULT_TRANSITION = 300
DEFAULT_ADD_TRANSITION = True
DEFAULT_TRANSITION_TURN_TOGGLE = False
ColorMode = str
# Once the minimum supported version of Python is 3.8,
# we can declare the ColorMode as a Literal
# ColorMode = Literal["auto", "xy_color", "color_temp"]
class LightEntity(Entity):
color_mode: ColorMode
def __init__(self, name: str, color_mode: ColorMode = "auto") -> None:
super().__init__(name)
self.color_mode = color_mode
class LightController(TypeController[LightEntity], ReleaseHoldController):
"""
This is the main class that controls the lights for different devices.
Type of actions:
- On/Off/Toggle
- Brightness click and hold
- Color temperature click and hold
- xy color click and hold
If a light supports xy_color and color_temperature, then xy_color will be the
default functionality. Parameters taken:
- controller (required): Inherited from Controller
- light (required): This is either the light entity name or a dictionary as
{name: string, color_mode: auto | xy_color | color_temp}
- delay (optional): Inherited from ReleaseHoldController
- manual_steps (optional): Number of steps to go from min to max when clicking.
- automatic_steps (optional): Number of steps to go from min to max when smoothing.
"""
ATTRIBUTE_BRIGHTNESS = "brightness"
ATTRIBUTE_WHITE_VALUE = "white_value"
# With the following attribute, it will select color_temp or xy_color, depending on the light.
ATTRIBUTE_COLOR = "color"
ATTRIBUTE_COLOR_TEMP = "color_temp"
ATTRIBUTE_XY_COLOR = "xy_color"
index_color = 0
value_attribute = None
# These are intermediate variables to store the checked value
smooth_power_on_check: bool
remove_transition_check: bool
domains = ["light"]
entity_arg = "light"
async def init(self) -> None:
manual_steps = self.args.get("manual_steps", DEFAULT_MANUAL_STEPS)
automatic_steps = self.args.get("automatic_steps", DEFAULT_AUTOMATIC_STEPS)
self.min_brightness = self.args.get("min_brightness", DEFAULT_MIN_BRIGHTNESS)
self.max_brightness = self.args.get("max_brightness", DEFAULT_MAX_BRIGHTNESS)
self.min_white_value = self.args.get("min_white_value", DEFAULT_MIN_WHITE_VALUE)
self.max_white_value = self.args.get("max_white_value", DEFAULT_MAX_WHITE_VALUE)
self.min_color_temp = self.args.get("min_color_temp", DEFAULT_MIN_COLOR_TEMP)
self.max_color_temp = self.args.get("max_color_temp", DEFAULT_MAX_COLOR_TEMP)
self.transition = self.args.get("transition", DEFAULT_TRANSITION)
self.color_wheel = get_color_wheel(
self.args.get("color_wheel", "default_color_wheel")
)
color_stepper = CircularStepper(
0, len(self.color_wheel) - 1, len(self.color_wheel)
)
self.manual_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, manual_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, manual_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, manual_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.automatic_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, automatic_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, automatic_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, automatic_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.smooth_power_on = self.args.get(
"smooth_power_on", self.supports_smooth_power_on()
)
self.add_transition = self.args.get("add_transition", DEFAULT_ADD_TRANSITION)
self.add_transition_turn_toggle = self.args.get(
"add_transition_turn_toggle", DEFAULT_TRANSITION_TURN_TOGGLE
)
await super().init()
def _get_entity_type(self) -> Type[LightEntity]:
return LightEntity
def get_predefined_actions_mapping(self) -> PredefinedActionsMapping:
return {
Light.ON: self.on,
Light.OFF: self.off,
Light.TOGGLE: self.toggle,
Light.TOGGLE_FULL_BRIGHTNESS: (
self.toggle_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_FULL_WHITE_VALUE: (
self.toggle_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_FULL_COLOR_TEMP: (
self.toggle_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.TOGGLE_MIN_BRIGHTNESS: (
self.toggle_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_MIN_WHITE_VALUE: (
self.toggle_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_MIN_COLOR_TEMP: (
self.toggle_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.RELEASE: self.release,
Light.ON_FULL_BRIGHTNESS: (
self.on_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_FULL_WHITE_VALUE: (
self.on_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_FULL_COLOR_TEMP: (
self.on_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.ON_MIN_BRIGHTNESS: (
self.on_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_MIN_WHITE_VALUE: (
self.on_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_MIN_COLOR_TEMP: (
self.on_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.SET_HALF_BRIGHTNESS: (
self.set_value,
(
LightController.ATTRIBUTE_BRIGHTNESS,
0.5,
),
),
Light.SET_HALF_WHITE_VALUE: (
self.set_value,
(
LightController.ATTRIBUTE_WHITE_VALUE,
0.5,
),
),
Light.SET_HALF_COLOR_TEMP: (
self.set_value,
(
LightController.ATTRIBUTE_COLOR_TEMP,
0.5,
),
),
Light.SYNC: self.sync,
Light.CLICK_BRIGHTNESS_UP: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.CLICK_BRIGHTNESS_DOWN: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.CLICK_WHITE_VALUE_UP: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.CLICK_WHITE_VALUE_DOWN: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.CLICK_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_TEMP_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.CLICK_COLOR_TEMP_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.CLICK_XY_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.CLICK_XY_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_UP: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.HOLD_BRIGHTNESS_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.TOGGLE,
),
),
Light.HOLD_WHITE_VALUE_UP: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.HOLD_WHITE_VALUE_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.HOLD_WHITE_VALUE_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.HOLD_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_TEMP_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.HOLD_COLOR_TEMP_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TEMP_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.TOGGLE,
),
),
Light.HOLD_XY_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.HOLD_XY_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_XY_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.TOGGLE,
),
),
Light.XYCOLOR_FROM_CONTROLLER: self.xycolor_from_controller,
Light.COLORTEMP_FROM_CONTROLLER: self.colortemp_from_controller,
}
async def check_remove_transition(self, on_from_user: bool) -> bool:
return (
not self.add_transition
or (on_from_user and not self.add_transition_turn_toggle)
or await self.feature_support.not_supported(LightSupport.TRANSITION)
)
async def call_light_service(self, service: str, **attributes) -> None:
if "transition" not in attributes:
attributes["transition"] = self.transition / 1000
if self.remove_transition_check:
del attributes["transition"]
await self.call_service(service, entity_id=self.entity.name, **attributes)
async def _on(self, **attributes) -> None:
await self.call_light_service("light/turn_on", **attributes)
@action
async def on(self, **attributes) -> None:
await self._on(**attributes)
async def _off(self, **attributes) -> None:
await self.call_light_service("light/turn_off", **attributes)
@action
async def off(self, **attributes) -> None:
await self._off(**attributes)
async def _toggle(self, **attributes) -> None:
await self.call_light_service("light/toggle", **attributes)
@action
async def toggle(self, **attributes) -> None:
await self._toggle(**attributes)
async def _set_value(self, attribute: str, fraction: float) -> None:
fraction = max(0, min(fraction, 1))
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
min_ = stepper.minmax.min
max_ = stepper.minmax.max
value = (max_ - min_) * fraction + min_
await self._on(**{attribute: value})
@action
async def set_value(self, attribute: str, fraction: float) -> None:
await self._set_value(attribute, fraction)
@action
async def toggle_full(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.max})
@action
async def toggle_min(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.min})
async def _on_full(self, attribute: str) -> None:
await self._set_value(attribute, 1)
@action
async def on_full(self, attribute: str) -> None:
await self._on_full(attribute)
async def _on_min(self, attribute: str) -> None:
await self._set_value(attribute, 0)
@action
async def on_min(self, attribute: str) -> None:
await self._on_min(attribute)
@action
async def sync(self) -> None:
attributes: Dict[Any, Any] = {}
try:
color_attribute = await self.get_attribute(LightController.ATTRIBUTE_COLOR)
if color_attribute == LightController.ATTRIBUTE_COLOR_TEMP:
attributes[color_attribute] = 370 # 2700K light
else:
attributes[color_attribute] = (0.323, 0.329) # white colour
except ValueError:
self.log(
"⚠️ `sync` action will only change brightness",
level="WARNING",
ascii_encode=False,
)
await self._on(**attributes, brightness=self.max_brightness)
@action
async def xycolor_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color" not in extra:
self.log(
"`action_color` is not present in the MQTT payload", level="WARNING"
)
return
xy_color = extra["action_color"]
await self._on(xy_color=(xy_color["x"], xy_color["y"]))
elif isinstance(self.integration, DeCONZIntegration):
if "xy" not in extra:
self.log("`xy` is not present in the deCONZ event", level="WARNING")
return
await self._on(xy_color=extra["xy"])
@action
async def colortemp_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color_temperature" not in extra:
self.log(
"`action_color_temperature` is not present in the MQTT payload",
level="WARNING",
)
return
await self._on(color_temp=extra["action_color_temperature"])
async def get_attribute(self, attribute: str) -> str:
if attribute == LightController.ATTRIBUTE_COLOR:
if self.entity.color_mode == "auto":
if await self.feature_support.is_supported(LightSupport.COLOR):
return LightController.ATTRIBUTE_XY_COLOR
elif await self.feature_support.is_supported(LightSupport.COLOR_TEMP):
return LightController.ATTRIBUTE_COLOR_TEMP
else:
raise ValueError(
"This light does not support xy_color or color_temp"
)
else:
return self.entity.color_mode
else:
return attribute
async def get_value_attribute(self, attribute: str) -> Union[float, int]:
if self.smooth_power_on_check:
return 0
if attribute == LightController.ATTRIBUTE_XY_COLOR:
return 0
elif (
attribute == LightController.ATTRIBUTE_BRIGHTNESS
or attribute == LightController.ATTRIBUTE_WHITE_VALUE
or attribute == LightController.ATTRIBUTE_COLOR_TEMP
):
value = await self.get_entity_state(self.entity.name, attribute)
if value is None:
raise ValueError(
f"Value for `{attribute}` attribute could not be retrieved "
f"from `{self.entity.name}`. "
"Check the FAQ to know more about this error: "
"https://xaviml.github.io/controllerx/faq"
)
else:
try:
return float(value)
except ValueError:
raise ValueError(
f"Attribute `{attribute}` with `{value}` as a value "
"could not be converted to float"
)
else:
raise ValueError(f"Attribute `{attribute}` not expected")
def check_smooth_power_on(
self, attribute: str, direction: str, light_state: str
) -> bool:
return (
direction != Stepper.DOWN
and attribute == self.ATTRIBUTE_BRIGHTNESS
and self.smooth_power_on
and light_state == "off"
)
async def before_action(self, action: str, *args, **kwargs) -> bool:
to_return = True
if action in ("click", "hold"):
attribute, direction = args
light_state: str = await self.get_entity_state(self.entity.name)
self.smooth_power_on_check = self.check_smooth_power_on(
attribute, direction, light_state
)
self.remove_transition_check = await self.check_remove_transition(
on_from_user=False
)
to_return = (light_state == "on") or self.smooth_power_on_check
else:
self.remove_transition_check = await self.check_remove_transition(
on_from_user=True
)
self.smooth_power_on_check = False
return await super().before_action(action, *args, **kwargs) and to_return
@action
async def click(self, attribute: str, direction: str) -> None:
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.manual_steppers[attribute],
"click",
)
@action
async def hold(self, attribute: str, direction: str) -> None: # type: ignore
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
self.log(
f"Attribute value before running the hold action: {self.value_attribute}",
level="DEBUG",
)
if direction == Stepper.TOGGLE:
self.log(
f"Previous direction: {self.automatic_steppers[attribute].previous_direction}",
level="DEBUG",
)
direction = self.automatic_steppers[attribute].get_direction(
self.value_attribute, direction
)
self.log(f"Going direction: {direction}", level="DEBUG")
await super().hold(attribute, direction)
async def hold_loop(self, attribute: str, direction: str) -> bool: # type: ignore
if self.value_attribute is None:
return True
return await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.automatic_steppers[attribute],
"hold",
)
async def change_light_state(
self,
old: float,
attribute: str,
direction: str,
stepper: Stepper,
action_type: str,
) -> bool:
"""
This functions changes the state of the light depending on the previous
value and attribute. It returns True when no more changes will need to be done.
Otherwise, it returns False.
"""
attributes: Dict[str, Any]
if attribute == LightController.ATTRIBUTE_XY_COLOR:
index_color, _ = stepper.step(self.index_color, direction)
self.index_color = int(index_color)
xy_color = self.color_wheel[self.index_color]
attributes = {attribute: xy_color}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
# In case of xy_color mode it never finishes the loop, the hold loop
# will only stop if the hold action is called when releasing the button.
# I haven't experimented any problems with it, but a future implementation
# would be to force the loop to stop after 4 or 5 loops as a safety measure.
return False
if self.smooth_power_on_check:
await self._on_min(attribute)
# # After smooth power on, the light should not brighten up.
return True
new_state_attribute, exceeded = stepper.step(old, direction)
new_state_attribute = round(new_state_attribute, 3)
attributes = {attribute: new_state_attribute}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
self.value_attribute = new_state_attribute
return exceeded
def supports_smooth_power_on(self) -> bool:
"""
This function can be overrided for each device to indicate the default behaviour of the controller
when the associated light is off and an event for incrementing brightness is received.
Returns True if the associated light should be turned on with minimum brightness if an event for incrementing
brightness is received, while the lamp is off.
The behaviour can be overridden by the user with the 'smooth_power_on' option in app configuration.
"""
return False
| [
"cx_core.stepper.minmax_stepper.MinMaxStepper"
]
| [((3924, 3993), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_brightness', 'self.max_brightness', 'manual_steps'], {}), '(self.min_brightness, self.max_brightness, manual_steps)\n', (3937, 3993), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n'), ((4076, 4147), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_white_value', 'self.max_white_value', 'manual_steps'], {}), '(self.min_white_value, self.max_white_value, manual_steps)\n', (4089, 4147), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n'), ((4229, 4298), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_color_temp', 'self.max_color_temp', 'manual_steps'], {}), '(self.min_color_temp, self.max_color_temp, manual_steps)\n', (4242, 4298), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n'), ((4509, 4581), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_brightness', 'self.max_brightness', 'automatic_steps'], {}), '(self.min_brightness, self.max_brightness, automatic_steps)\n', (4522, 4581), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n'), ((4664, 4738), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_white_value', 'self.max_white_value', 'automatic_steps'], {}), '(self.min_white_value, self.max_white_value, automatic_steps)\n', (4677, 4738), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n'), ((4820, 4892), 'cx_core.stepper.minmax_stepper.MinMaxStepper', 'MinMaxStepper', (['self.min_color_temp', 'self.max_color_temp', 'automatic_steps'], {}), '(self.min_color_temp, self.max_color_temp, automatic_steps)\n', (4833, 4892), False, 'from cx_core.stepper.minmax_stepper import MinMaxStepper\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
def load_arguments(self, _):
for scope in ['alert',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['auto-provisioning-setting update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'auto_provision',
validator=validate_auto_provisioning_toggle,
arg_type=auto_provisioning_auto_provision_arg_type)
for scope in ['contact create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'email',
arg_type=contact_email_arg_type)
c.argument(
'phone',
arg_type=contact_phone_arg_type)
c.argument(
'alert_notifications',
arg_type=contact_alert_notifications_arg_type)
c.argument(
'alerts_admins',
arg_type=contact_alerts_admins_arg_type)
for scope in ['pricing create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'tier',
validator=validate_pricing_tier,
arg_type=pricing_tier_arg_type)
for scope in ['workspace-setting create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'target_workspace',
arg_type=workspace_setting_target_workspace_arg_type)
| [
"knack.arguments.CLIArgumentType"
]
| [((671, 781), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': "('--name', '-n')", 'metavar': '"""NAME"""', 'help': '"""name of the resource to be fetched"""'}), "(options_list=('--name', '-n'), metavar='NAME', help=\n 'name of the resource to be fetched')\n", (686, 781), False, 'from knack.arguments import CLIArgumentType\n'), ((800, 939), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': "('--home-region', '-hr')", 'metavar': '"""HOMEREGION"""', 'help': '"""home region that was selected for the subscription"""'}), "(options_list=('--home-region', '-hr'), metavar='HOMEREGION',\n help='home region that was selected for the subscription')\n", (815, 939), False, 'from knack.arguments import CLIArgumentType\n'), ((956, 1064), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': "('--location', '-l')", 'metavar': '"""LOCATION"""', 'help': '"""location of the resource"""'}), "(options_list=('--location', '-l'), metavar='LOCATION', help\n ='location of the resource')\n", (971, 1064), False, 'from knack.arguments import CLIArgumentType\n'), ((1094, 1238), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--status"""', 'metavar': '"""STATUS"""', 'help': '"""target status of the alert. possible values are "dismiss" and "activate\\""""'}), '(options_list=\'--status\', metavar=\'STATUS\', help=\n \'target status of the alert. possible values are "dismiss" and "activate"\')\n', (1109, 1238), False, 'from knack.arguments import CLIArgumentType\n'), ((1301, 1451), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--auto-provision"""', 'metavar': '"""AUTOPROVISION"""', 'help': '"""Automatic provisioning toggle. possible values are "on" or "off\\""""'}), '(options_list=\'--auto-provision\', metavar=\'AUTOPROVISION\',\n help=\'Automatic provisioning toggle. possible values are "on" or "off"\')\n', (1316, 1451), False, 'from knack.arguments import CLIArgumentType\n'), ((1487, 1587), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--email"""', 'metavar': '"""EMAIL"""', 'help': '"""E-mail of the security contact"""'}), "(options_list='--email', metavar='EMAIL', help=\n 'E-mail of the security contact')\n", (1502, 1587), False, 'from knack.arguments import CLIArgumentType\n'), ((1610, 1709), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--phone"""', 'metavar': '"""PHONE"""', 'help': '"""Phone of the security contact"""'}), "(options_list='--phone', metavar='PHONE', help=\n 'Phone of the security contact')\n", (1625, 1709), False, 'from knack.arguments import CLIArgumentType\n'), ((1746, 1907), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--alert-notifications"""', 'metavar': '"""ALERTNOTIFICATIONS"""', 'help': '"""Whether to send mail notifications to the security contacts"""'}), "(options_list='--alert-notifications', metavar=\n 'ALERTNOTIFICATIONS', help=\n 'Whether to send mail notifications to the security contacts')\n", (1761, 1907), False, 'from knack.arguments import CLIArgumentType\n'), ((1933, 2086), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--alerts-admins"""', 'metavar': '"""ALERTADMINS"""', 'help': '"""Whether to send mail notifications to the subscription administrators"""'}), "(options_list='--alerts-admins', metavar='ALERTADMINS', help\n ='Whether to send mail notifications to the subscription administrators')\n", (1948, 2086), False, 'from knack.arguments import CLIArgumentType\n'), ((2119, 2204), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--tier"""', 'metavar': '"""TIER"""', 'help': '"""pricing tier type"""'}), "(options_list='--tier', metavar='TIER', help='pricing tier type'\n )\n", (2134, 2204), False, 'from knack.arguments import CLIArgumentType\n'), ((2270, 2430), 'knack.arguments.CLIArgumentType', 'CLIArgumentType', ([], {'options_list': '"""--target-workspace"""', 'metavar': '"""TARGETWORKSPACE"""', 'help': '"""An ID of the workspace resource that will hold the security data"""'}), "(options_list='--target-workspace', metavar=\n 'TARGETWORKSPACE', help=\n 'An ID of the workspace resource that will hold the security data')\n", (2285, 2430), False, 'from knack.arguments import CLIArgumentType\n')] |
import unittest
import torch
from parameterized import parameterized
from src.constructor import create_backbone
from src.models.backbones.utils import list_models
from .test_segmentation import example_backbones
def inp(bsize, in_ch, w, h):
return torch.ones(bsize, in_ch, w, h)
class TestBackboneCorrectness(unittest.TestCase):
def setUp(self) -> None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@parameterized.expand(list_models(module='vision_transformer', exclude_filters=''))
def test_vit_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='coat', exclude_filters=''))
def test_coat_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='swin_transformer', exclude_filters=''))
def test_swin_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name).to(self.device).eval()
input = torch.rand(2, 3, *model.img_size, device=self.device)
with torch.no_grad():
torch.jit.trace(model, input)
torch.cuda.empty_cache()
| [
"torch.jit.trace",
"src.constructor.create_backbone",
"src.models.backbones.utils.list_models",
"torch.cuda.is_available",
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.rand",
"torch.ones"
]
| [((257, 287), 'torch.ones', 'torch.ones', (['bsize', 'in_ch', 'w', 'h'], {}), '(bsize, in_ch, w, h)\n', (267, 287), False, 'import torch\n'), ((790, 814), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (812, 814), False, 'import torch\n'), ((481, 541), 'src.models.backbones.utils.list_models', 'list_models', ([], {'module': '"""vision_transformer"""', 'exclude_filters': '""""""'}), "(module='vision_transformer', exclude_filters='')\n", (492, 541), False, 'from src.models.backbones.utils import list_models\n'), ((1138, 1162), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1160, 1162), False, 'import torch\n'), ((842, 888), 'src.models.backbones.utils.list_models', 'list_models', ([], {'module': '"""coat"""', 'exclude_filters': '""""""'}), "(module='coat', exclude_filters='')\n", (853, 888), False, 'from src.models.backbones.utils import list_models\n'), ((1399, 1452), 'torch.rand', 'torch.rand', (['(2)', '(3)', '*model.img_size'], {'device': 'self.device'}), '(2, 3, *model.img_size, device=self.device)\n', (1409, 1452), False, 'import torch\n'), ((1533, 1557), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1555, 1557), False, 'import torch\n'), ((1190, 1248), 'src.models.backbones.utils.list_models', 'list_models', ([], {'module': '"""swin_transformer"""', 'exclude_filters': '""""""'}), "(module='swin_transformer', exclude_filters='')\n", (1201, 1248), False, 'from src.models.backbones.utils import list_models\n'), ((718, 733), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (731, 733), False, 'import torch\n'), ((747, 781), 'torch.jit.trace', 'torch.jit.trace', (['model', 'self.input'], {}), '(model, self.input)\n', (762, 781), False, 'import torch\n'), ((1066, 1081), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1079, 1081), False, 'import torch\n'), ((1095, 1129), 'torch.jit.trace', 'torch.jit.trace', (['model', 'self.input'], {}), '(model, self.input)\n', (1110, 1129), False, 'import torch\n'), ((1466, 1481), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1479, 1481), False, 'import torch\n'), ((1495, 1524), 'torch.jit.trace', 'torch.jit.trace', (['model', 'input'], {}), '(model, input)\n', (1510, 1524), False, 'import torch\n'), ((416, 441), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (439, 441), False, 'import torch\n'), ((621, 681), 'src.constructor.create_backbone', 'create_backbone', (['backbone_name'], {'img_size': 'self.input.shape[2]'}), '(backbone_name, img_size=self.input.shape[2])\n', (636, 681), False, 'from src.constructor import create_backbone\n'), ((969, 1029), 'src.constructor.create_backbone', 'create_backbone', (['backbone_name'], {'img_size': 'self.input.shape[2]'}), '(backbone_name, img_size=self.input.shape[2])\n', (984, 1029), False, 'from src.constructor import create_backbone\n'), ((1329, 1359), 'src.constructor.create_backbone', 'create_backbone', (['backbone_name'], {}), '(backbone_name)\n', (1344, 1359), False, 'from src.constructor import create_backbone\n')] |
import nltk
import json
import plotly
import pandas as pd
import plotly.graph_objects as go
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','wordnet'])
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("models/model.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# Viz 1
genre = df.groupby('genre').count()['id'].sort_values()
# Viz 2
df['text length'] = df['message'].apply(lambda x: len(x.split()))
histogram = df[df['text length'] < 100].groupby('text length').count()['id']
# Viz 3
total_category = df.drop(columns=['id','message','original','genre', 'text length']).sum().sort_values(ascending=False).head(5)
# create visuals
graphs = [
{
'data': [
Bar(
x=genre.values,
y=genre.index,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Genre"
},
'xaxis': {
'title': "Counts"
}
}
},
{
'data': [
Bar(
x=histogram.index,
y=histogram.values
)
],
'layout': {
'title': 'Distribution of Messages Length',
'yaxis': {
'title': "Total Messages"
},
'xaxis': {
'title': "Total Words"
}
}
},
{
'data': [
Bar(
x=total_category.index,
y=total_category.values
)
],
'layout': {
'title': 'Total Messages per Category (Top 5)',
'yaxis': {
'title': "Total"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run()
#app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| [
"flask.render_template",
"flask.request.args.get",
"nltk.download",
"flask.Flask",
"sqlalchemy.create_engine",
"json.dumps",
"nltk.stem.WordNetLemmatizer",
"nltk.tokenize.word_tokenize",
"plotly.graph_objs.Bar",
"joblib.load",
"pandas.read_sql_table"
]
| [((172, 207), 'nltk.download', 'nltk.download', (["['punkt', 'wordnet']"], {}), "(['punkt', 'wordnet'])\n", (185, 207), False, 'import nltk\n'), ((388, 403), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (393, 403), False, 'from flask import Flask\n'), ((689, 740), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///data/DisasterResponse.db"""'], {}), "('sqlite:///data/DisasterResponse.db')\n", (702, 740), False, 'from sqlalchemy import create_engine\n'), ((746, 783), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""messages"""', 'engine'], {}), "('messages', engine)\n", (763, 783), True, 'import pandas as pd\n'), ((806, 837), 'joblib.load', 'joblib.load', (['"""models/model.pkl"""'], {}), "('models/model.pkl')\n", (817, 837), False, 'import joblib\n'), ((438, 457), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (451, 457), False, 'from nltk.tokenize import word_tokenize\n'), ((475, 494), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (492, 494), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2936, 2990), 'json.dumps', 'json.dumps', (['graphs'], {'cls': 'plotly.utils.PlotlyJSONEncoder'}), '(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n', (2946, 2990), False, 'import json\n'), ((3048, 3108), 'flask.render_template', 'render_template', (['"""master.html"""'], {'ids': 'ids', 'graphJSON': 'graphJSON'}), "('master.html', ids=ids, graphJSON=graphJSON)\n", (3063, 3108), False, 'from flask import render_template, request, jsonify\n'), ((3244, 3273), 'flask.request.args.get', 'request.args.get', (['"""query"""', '""""""'], {}), "('query', '')\n", (3260, 3273), False, 'from flask import render_template, request, jsonify\n'), ((3530, 3620), 'flask.render_template', 'render_template', (['"""go.html"""'], {'query': 'query', 'classification_result': 'classification_results'}), "('go.html', query=query, classification_result=\n classification_results)\n", (3545, 3620), False, 'from flask import render_template, request, jsonify\n'), ((1480, 1531), 'plotly.graph_objs.Bar', 'Bar', ([], {'x': 'genre.values', 'y': 'genre.index', 'orientation': '"""h"""'}), "(x=genre.values, y=genre.index, orientation='h')\n", (1483, 1531), False, 'from plotly.graph_objs import Bar, Histogram\n'), ((1949, 1991), 'plotly.graph_objs.Bar', 'Bar', ([], {'x': 'histogram.index', 'y': 'histogram.values'}), '(x=histogram.index, y=histogram.values)\n', (1952, 1991), False, 'from plotly.graph_objs import Bar, Histogram\n'), ((2404, 2456), 'plotly.graph_objs.Bar', 'Bar', ([], {'x': 'total_category.index', 'y': 'total_category.values'}), '(x=total_category.index, y=total_category.values)\n', (2407, 2456), False, 'from plotly.graph_objs import Bar, Histogram\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
class MXReshapeReverse(FrontReplacementOp):
"""
If reshape layer with reverse True, special values will inferred from right to left.
The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims,
and after reshape reverse output result to backward.
Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph.
"""
op = 'MXReshape'
enabled = True
def run_before(self):
return [MXReshapeToReshape]
def replace_sub_graph(self, graph: Graph, match: dict):
mxreshape = match['op']
if not mxreshape.reverse:
return
shape_node = Shape(graph, dict(name=mxreshape.id + '/Shape')).create_node()
forward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardUnsqueeze'))
forward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/ForwardReverse', axis=1)).create_node()
forward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardSqueeze'))
reshape_node = Reshape(graph, dict(name=mxreshape.id + '/Reshape')).create_node()
shape_node.in_port(0).connect(mxreshape.in_port(0).get_source())
mxreshape.in_port(0).get_connection().set_destination(reshape_node.in_port(0))
forward_reverse_unsqueeze_node.in_port(0).connect(shape_node.out_port(0))
forward_reverse_node.in_port(0).connect(forward_reverse_unsqueeze_node.out_port(0))
forward_reverse_squeeze_node.in_port(0).connect(forward_reverse_node.out_port(0))
reshape_node.in_port(1).connect(forward_reverse_squeeze_node.out_port(0))
reshape_shape_node = create_op_node_with_second_input(graph, Reshape, int64_array(np.flip(mxreshape.dim, 0)),
dict(name=str(mxreshape.id) + '/ReshapeShape'))
if np.sum(np.in1d([-2, -3, -4], mxreshape.dim), axis=0):
reshape_shape_node = MXReshape(graph, dict(name=mxreshape.id + '/Reshape',
dim=int64_array(np.flip(mxreshape.dim, 0)))).create_node()
reshape_shape_node.in_port(0).connect(reshape_node.out_port(0))
backward_shape_node = Shape(graph, dict(name=mxreshape.id + '/BackwardShape')).create_node()
backward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardUnsqueeze'))
backward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/BackwardReverse', axis=1)).create_node()
backward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardSqueeze'))
backward_reshape_node = Reshape(graph, dict(name=mxreshape.id + '/BackwardReshape')).create_node()
backward_shape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reverse_unsqueeze_node.in_port(0).connect(backward_shape_node.out_port(0))
backward_reverse_node.in_port(0).connect(backward_reverse_unsqueeze_node.out_port(0))
backward_reverse_squeeze_node.in_port(0).connect(backward_reverse_node.out_port(0))
backward_reshape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reshape_node.in_port(1).connect(backward_reverse_squeeze_node.out_port(0))
mxreshape.out_port(0).get_connection().set_source(backward_reshape_node.out_port(0))
| [
"numpy.in1d",
"numpy.flip",
"openvino.tools.mo.front.common.partial_infer.utils.int64_array"
]
| [((1606, 1622), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (1617, 1622), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((1952, 1968), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (1963, 1968), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2936, 2972), 'numpy.in1d', 'np.in1d', (['[-2, -3, -4]', 'mxreshape.dim'], {}), '([-2, -3, -4], mxreshape.dim)\n', (2943, 2972), True, 'import numpy as np\n'), ((3434, 3450), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (3445, 3450), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((3784, 3800), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (3795, 3800), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2780, 2805), 'numpy.flip', 'np.flip', (['mxreshape.dim', '(0)'], {}), '(mxreshape.dim, 0)\n', (2787, 2805), True, 'import numpy as np\n'), ((3123, 3148), 'numpy.flip', 'np.flip', (['mxreshape.dim', '(0)'], {}), '(mxreshape.dim, 0)\n', (3130, 3148), True, 'import numpy as np\n')] |
# PassWord - The Safe Password Generator App!
# importing the tkinter module for GUI
from tkinter import *
# importing the message box widget from tkinter
from tkinter import messagebox
# importing sqlite3 for database
import sqlite3
# importing random for password generation
import random
# creating fonts
font = ('Fixedsys', 10)
font2 = ('Comic Sans MS', 9)
font3 = ('System', 9)
font4 = ('Two Cen MT', 9)
# creating a database and establishing a connection
conn = sqlite3.connect('password.db')
# creating a cursor to navigate through database
c = conn.cursor()
# creating the table
'''
c.execute("""CREATE TABLE passwords (
password text
)""")
'''
# defining the root variable
root = Tk()
# Naming the app
root.title('PassWord')
# creating a label frame to organize content
label_frame = LabelFrame(root, padx=10, pady=10, text='Password Generator', font=font)
# printing the label frame onto the screen or window
label_frame.grid(row=0, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# creating a separate label frame to perform delete functions
delete_labelframe = LabelFrame(root, text='Delete Password', padx=10, pady=10, font=font4)
# printing delete labelframe onto the screen
delete_labelframe.grid(row=5, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# making the text box where password is going to be displayed
e = Entry(label_frame, fg='black', bg='white')
# printing the text box to the screen
e.grid(row=0, column=0, padx=10, pady=10, columnspan=1)
# (for the delete function) to give information on input for delete function
# (for the delete function) to give information on input for delete function
info = Label(delete_labelframe, text='Password ID', fg='black', font=font2)
# printing the label onto the screen
info.grid(row=6, column=0, pady=10)
# making the entry for user to input which password
e2 = Entry(delete_labelframe, fg='black', bg='white')
# printing the entry onto the screen
e2.grid(row=6, column=1, pady=10)
# making the password generate function
def generate():
# creating lists
lowercase_letters = ['a', 'b', 'c', 'd', 'e' 'f' 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't',
'u' 'v', 'w', 'x', 'y', 'z']
# creating lists
uppercase_letters = ['A', 'B', 'C', 'D', 'E' 'F' 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U' 'V', 'W', 'X', 'Y', 'Z']
# creating lists
symbols_list = ['-', '@', '!' '$', '%' '&' '?', '#', '^']
# creating lists
numbers_list = ['1', '2', '3', '4', '5', '6', '7' '8', '9' '0']
# generating a random value from the lists
lowercase_letter = random.choice(lowercase_letters)
# generating a random value from the lists
lowercase_letter2 = random.choice(lowercase_letters)
# generating a random value from the lists
uppercase_letter = random.choice(uppercase_letters)
# generating a random value from the lists
uppercase2_letter = random.choice(uppercase_letters)
# generating a random value from the lists
symbol = random.choice(symbols_list)
# generating a random value from the lists
symbol2 = random.choice(symbols_list)
# generating a random value from the lists
number = random.choice(numbers_list)
# generating a random value from the lists
number2 = random.choice(numbers_list)
# creating a password list made of random values from previous lists
password = [lowercase_letter, uppercase_letter, uppercase2_letter, lowercase_letter2, symbol, symbol2, number,
number2]
# shuffling password list
password1 = random.sample(password, 8)
# concatenating and making final list
final_password = password1[0] + password1[1] + password1[2] + password1[3] + password1[4] + password1[5] + \
password1[6] + password1[7]
# deleting previous item from entry
e.delete(0, END)
# inserting the final password
e.insert(0, final_password)
# making a function to save the password into the database
def save_password():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("INSERT INTO passwords VALUES (?)", (e.get(),))
e.delete(0, END)
conn.commit()
conn.close()
# making a function to show all the saved passwords
def show_password():
global passcode_label
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM passwords")
passcodes = c.fetchall()
print_code = ''
for passcode in passcodes:
print_code += str(passcode[0]) + '.' + ' ' + str(passcode[1]) + '\n'
passcode_label = Text(label_frame, height=15, width=25)
passcode_label.configure(state='normal')
passcode_label.insert(1.0, print_code)
passcode_label.grid(row=5, column=0, padx=10, pady=10)
passcode_label.configure(state='disabled')
conn.commit()
conn.close()
# making a function to hide the saved passwords
def hide_password():
passcode_label.destroy()
# making a function to delete passwords from database
def delete():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("DELETE from passwords WHERE oid = (?)", (e2.get(),))
e2.delete(0, END)
passcode_label.destroy()
conn.commit()
conn.close()
# making a function to delete all the passwords in the database
def delete_all():
global number_of_passwords
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid FROM passwords")
number_of_passwords = c.fetchall()
num_of_passwords = len(number_of_passwords)
confirmation = messagebox.askyesno('Delete All Passwords?',
'You have chosen to delete ' + str(
num_of_passwords) + ' passwords. This action cannot be reversed. Do you wish to proceed?')
if confirmation == 1:
c.execute("DELETE FROM passwords")
conn.commit()
conn.close()
# button for generating password
generate_password = Button(label_frame, text='Generate Strong Password', command=generate, font=font2)
# printing the button onto the screen
generate_password.grid(row=1, padx=10, pady=10, column=0)
# button to save password
save = Button(label_frame, text='Save Password', command=save_password, font=font2)
# printing the button onto the screen
save.grid(row=2, padx=10, pady=10, column=0)
# making a button to show all the passwords
show = Button(label_frame, text='Show Passwords', command=show_password, font=font2)
# printing the button onto the screen
show.grid(row=4, padx=10, pady=10, column=0)
# making a button to hide the shown passwords
hide = Button(label_frame, text='Hide Passwords', command=hide_password, font=font2)
# printing the button onto the screen
hide.grid(row=6, column=0, padx=10, pady=10)
# making a button to delete a password
delete = Button(delete_labelframe, text='Delete Password', command=delete, font=font2)
# printing the button onto the screen
delete.grid(row=8, padx=10, pady=10, column=1)
# making a button to delete all the passwords
delete_all = Button(delete_labelframe, text='Delete All', command=delete_all, fg='dark red', width=20, anchor=CENTER,
font=font3)
# printing the button onto the screen
delete_all.grid(row=9, column=1, padx=10, pady=10, ipadx=15)
# committing the changes to the database
conn.commit()
# closing the connection with database
conn.close()
# making the final loop
root.mainloop()
| [
"random.sample",
"random.choice",
"sqlite3.connect"
]
| [((496, 526), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (511, 526), False, 'import sqlite3\n'), ((2799, 2831), 'random.choice', 'random.choice', (['lowercase_letters'], {}), '(lowercase_letters)\n', (2812, 2831), False, 'import random\n'), ((2907, 2939), 'random.choice', 'random.choice', (['lowercase_letters'], {}), '(lowercase_letters)\n', (2920, 2939), False, 'import random\n'), ((3014, 3046), 'random.choice', 'random.choice', (['uppercase_letters'], {}), '(uppercase_letters)\n', (3027, 3046), False, 'import random\n'), ((3122, 3154), 'random.choice', 'random.choice', (['uppercase_letters'], {}), '(uppercase_letters)\n', (3135, 3154), False, 'import random\n'), ((3219, 3246), 'random.choice', 'random.choice', (['symbols_list'], {}), '(symbols_list)\n', (3232, 3246), False, 'import random\n'), ((3312, 3339), 'random.choice', 'random.choice', (['symbols_list'], {}), '(symbols_list)\n', (3325, 3339), False, 'import random\n'), ((3404, 3431), 'random.choice', 'random.choice', (['numbers_list'], {}), '(numbers_list)\n', (3417, 3431), False, 'import random\n'), ((3497, 3524), 'random.choice', 'random.choice', (['numbers_list'], {}), '(numbers_list)\n', (3510, 3524), False, 'import random\n'), ((3793, 3819), 'random.sample', 'random.sample', (['password', '(8)'], {}), '(password, 8)\n', (3806, 3819), False, 'import random\n'), ((4263, 4293), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (4278, 4293), False, 'import sqlite3\n'), ((4557, 4587), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (4572, 4587), False, 'import sqlite3\n'), ((5309, 5339), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (5324, 5339), False, 'import sqlite3\n'), ((5660, 5690), 'sqlite3.connect', 'sqlite3.connect', (['"""password.db"""'], {}), "('password.db')\n", (5675, 5690), False, 'import sqlite3\n')] |
from flask import Flask, render_template, jsonify
from reddit_handler import *
app = Flask(__name__)
meme_subreddits = ['izlam']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/meme')
def one_post():
sub = random.choice(meme_subreddits)
re = get_posts(sub, 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return jsonify({
'title': r[0],
'url': r[1],
'postLink': r[2],
'subreddit': sub
})
@app.route('/sample')
def sample():
re = get_posts(random.choice(meme_subreddits), 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return render_template('sample.html', title=r[0], img_url=r[1], shortlink=r[2])
@app.route('/test')
def test():
re = get_posts(random.choice(meme_subreddits), 100)
return render_template('test.html', re=re)
@app.route('/<something>')
def not_found(something):
return render_template('not_found.html')
| [
"flask.render_template",
"flask.jsonify",
"flask.Flask"
]
| [((89, 104), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (94, 104), False, 'from flask import Flask, render_template, jsonify\n'), ((183, 212), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (198, 212), False, 'from flask import Flask, render_template, jsonify\n'), ((437, 510), 'flask.jsonify', 'jsonify', (["{'title': r[0], 'url': r[1], 'postLink': r[2], 'subreddit': sub}"], {}), "({'title': r[0], 'url': r[1], 'postLink': r[2], 'subreddit': sub})\n", (444, 510), False, 'from flask import Flask, render_template, jsonify\n'), ((763, 835), 'flask.render_template', 'render_template', (['"""sample.html"""'], {'title': 'r[0]', 'img_url': 'r[1]', 'shortlink': 'r[2]'}), "('sample.html', title=r[0], img_url=r[1], shortlink=r[2])\n", (778, 835), False, 'from flask import Flask, render_template, jsonify\n'), ((945, 980), 'flask.render_template', 'render_template', (['"""test.html"""'], {'re': 're'}), "('test.html', re=re)\n", (960, 980), False, 'from flask import Flask, render_template, jsonify\n'), ((1052, 1085), 'flask.render_template', 'render_template', (['"""not_found.html"""'], {}), "('not_found.html')\n", (1067, 1085), False, 'from flask import Flask, render_template, jsonify\n')] |
import pytest
from selenium import webdriver
import re
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.get("http://localhost/litecart/en/")
request.addfinalizer(wd.quit)
return wd
# check that product names are identical on the main page and on product page
def test_product_names(driver):
# get a product name on the main page
main_name = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light .name").text
# get a product name on a product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
product_name = driver.find_element_by_css_selector("#box-product .title").text
assert main_name == product_name, "Product names on the main page and on product page are NOT identical"
# check that prices (regular and campaign) are identical on the main page and on product page
def test_prices(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a regular price on the main page
main_regular_price = prices.find_element_by_css_selector(".regular-price").text
# get a campaign price on the main page
main_campaign_price = prices.find_element_by_css_selector(".campaign-price").text
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
# get a regular price on a product page
product_regular_price = driver.find_element_by_css_selector("#box-product .price-wrapper .regular-price").text
# get a campaign price on a product page
product_campaign_price = driver.find_element_by_css_selector("#box-product .price-wrapper .campaign-price").text
assert main_regular_price == product_regular_price, "Regular prices on the main page and on the product page " \
"are NOT identical"
assert main_campaign_price == product_campaign_price, "Campaign prices on the main page and on the product page " \
"are NOT identical"
# check color of regular and campaign prices and their attributes on the main page
def test_colors_main_page(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+',regular_color)
assert(color_list[0] == color_list[1] == color_list[2]), "The regular price on the main page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+',campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the main page is NOT red"
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property("text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the main page"
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the main page"
# check color of regular and campaign prices and their attributes on the product page
def test_colors_product_page(driver):
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+', regular_color)
assert (color_list[0] == color_list[1] == color_list[2]), "The regular price on the product page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+', campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the product page is NOT red"
# verify that the regular price is line-through
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property(
"text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the product page"
# verify that the campaign price is bold
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property(
"font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the product page"
# check that campaign price is bigger than regular prise on the main and product pages
def test_size_comparison(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the main page"
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the product page"
| [
"selenium.webdriver.Chrome",
"re.findall"
]
| [((102, 120), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (118, 120), False, 'from selenium import webdriver\n'), ((2691, 2724), 're.findall', 're.findall', (['"""\\\\d+"""', 'regular_color'], {}), "('\\\\d+', regular_color)\n", (2701, 2724), False, 'import re\n'), ((3086, 3120), 're.findall', 're.findall', (['"""\\\\d+"""', 'campaign_color'], {}), "('\\\\d+', campaign_color)\n", (3096, 3120), False, 'import re\n'), ((4269, 4302), 're.findall', 're.findall', (['"""\\\\d+"""', 'regular_color'], {}), "('\\\\d+', regular_color)\n", (4279, 4302), False, 'import re\n'), ((4669, 4703), 're.findall', 're.findall', (['"""\\\\d+"""', 'campaign_color'], {}), "('\\\\d+', campaign_color)\n", (4679, 4703), False, 'import re\n')] |
import shutil
from pyrite import fs
from pyrite.command_line import run_command
from pyrite.errors import UserError
from pyrite.globals import Globals
from os.path import join
class LLVMInterface:
_clang_path: str
def __init__(self):
self._clang_path = self._get_clang_path()
def _get_clang_path(self) -> str:
clang_path = shutil.which(Globals.get_compiler_options().clang_command)
if not clang_path:
raise UserError(
"Pyrite requires clang to be installed, but no such installation was found."
)
return clang_path
def compile_ll(self, source: str, output_path: str) -> None:
"""
Compile the contents of [source] as LLVM IR code, outputting a binary
specified by [output_path]. If any errors arise in compilation,
raise an error.
"""
ir_path = join(self.get_build_directory(), "build.ll")
fs.write_file(
path=ir_path,
data=source
)
result = run_command([self._clang_path, ir_path, "-o", output_path])
if result.stderr:
fs.write_file(
path=join(self.get_build_directory(), "llvm_error.txt"),
data=result.stderr
)
raise UserError(
"An unexpected error occurred during the compilation process. A detailed report has been written to {}".format(
self.get_build_directory()
)
)
def get_build_directory(self) -> str:
"""
Pyrite uses a temporary working "build" directory to store files needed for LLVM/Clang
"""
cwd = Globals.get_compiler_options().cwd
return join(cwd, "_build")
| [
"os.path.join",
"pyrite.globals.Globals.get_compiler_options",
"pyrite.fs.write_file",
"pyrite.command_line.run_command",
"pyrite.errors.UserError"
]
| [((953, 993), 'pyrite.fs.write_file', 'fs.write_file', ([], {'path': 'ir_path', 'data': 'source'}), '(path=ir_path, data=source)\n', (966, 993), False, 'from pyrite import fs\n'), ((1046, 1105), 'pyrite.command_line.run_command', 'run_command', (["[self._clang_path, ir_path, '-o', output_path]"], {}), "([self._clang_path, ir_path, '-o', output_path])\n", (1057, 1105), False, 'from pyrite.command_line import run_command\n'), ((1755, 1774), 'os.path.join', 'join', (['cwd', '"""_build"""'], {}), "(cwd, '_build')\n", (1759, 1774), False, 'from os.path import join\n'), ((459, 556), 'pyrite.errors.UserError', 'UserError', (['"""Pyrite requires clang to be installed, but no such installation was found."""'], {}), "(\n 'Pyrite requires clang to be installed, but no such installation was found.'\n )\n", (468, 556), False, 'from pyrite.errors import UserError\n'), ((1704, 1734), 'pyrite.globals.Globals.get_compiler_options', 'Globals.get_compiler_options', ([], {}), '()\n', (1732, 1734), False, 'from pyrite.globals import Globals\n'), ((367, 397), 'pyrite.globals.Globals.get_compiler_options', 'Globals.get_compiler_options', ([], {}), '()\n', (395, 397), False, 'from pyrite.globals import Globals\n')] |
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_classif, SelectKBest
import numpy as np
import pandas as pd
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-7.2.0-posix-sjlj-rt_v5-rev1\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
# Constants
FILE_PATH_TRAIN = "./input/train.h5"
FILE_PATH_TEST = "./input/test.h5"
TEST_SIZE = 0.25
# read training file
# test_data = pd.read_hdf(FILE_PATH_TRAIN, "test")
training_data = pd.read_hdf(FILE_PATH_TRAIN, "train")
# training data
# extracting the x-values
x_values_training = training_data.copy()
x_values_training = x_values_training.drop(labels=['y'], axis=1)
x_component_training = x_values_training.values
# extracting the y-values
y_component_training = training_data['y'].values
# training the scaler
scaler = StandardScaler(with_mean=True, with_std=True)
scaler = scaler.fit(x_component_training)
# scaling the training and test data
x_train_scaled = scaler.transform(x_component_training)
# feature selection
selector = SelectKBest(f_classif, k=25)
selector = selector.fit(x_train_scaled, y_component_training)
x_train_scaled_new = selector.transform(x_train_scaled)
# splitting the training set into a training & validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_scaled_new, y_component_training, test_size=TEST_SIZE, random_state=42)
# training, evaluation and test data in xgboost DMatrix
xg_train = xgb.DMatrix(x_train, label=y_train)
xg_val = xgb.DMatrix(x_val, label=y_val)
# setup parameters for xgboost
params = {}
# use softmax multi-class classification
params['objective'] = 'multi:softmax'
# scale weight of positive examples
params['silent'] = 0
params['num_class'] = 5
params['tree_method'] = 'auto'
params['seed'] = 42
# number of boosting rounds
rounds = 300
# gridsearch_params = [
# (max_depth, min_child_weight)
# for max_depth in range(6,13,2)
# for min_child_weight in range(4,9,2)
# ]
# print(gridsearch_params)
# best_params = None
# min_error = float("Inf")
# for max_depth, min_child_weight in gridsearch_params:
# print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight))
# # Update our parameters
# params['max_depth'] = max_depth
# params['min_child_weight'] = min_child_weight
# # Run CV
# cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# # Update best error
# mean_error = cv_results['test-merror-mean'].min()
# boost_rounds = cv_results['test-merror-mean'].argmin()
# print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds))
# print()
# if mean_error < min_error:
# min_error = mean_error
# best_params = (max_depth, min_child_weight)
# print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error))
# # grid search parameters
# gridsearch_params = []
# # tree depth, gamma, learning rate, regularization lambda
# for max_tree_depth in range(6, 11, 1):
# for gamma in range(0, 13, 2):
# for learn_rate in [0.3, 0.1, 0.05]:
# for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]:
# gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda))
# print(gridsearch_params)
gridsearch_params = [
(max_depth, gamma)
for max_depth in range(6,13,2)
for gamma in range(0,13,2)
]
print(gridsearch_params)
best_params = None
min_test_error = float("Inf")
min_train_error = float("Inf")
file = open("output.txt", mode="w+", encoding='utf-8', newline='\n')
for max_depth, gamma in gridsearch_params:
print("CV with max_depth={}, gamma={}".format(max_depth, gamma))
file.write("CV with max_depth={}, gamma={}\n".format(max_depth, gamma))
# Update our parameters
params['max_depth'] = max_depth
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# Update best error
test_error = cv_results['test-merror-mean'].min()
train_error = cv_results['train-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("Multiclass Error {} for {} rounds".format(test_error, boost_rounds))
print()
file.write("Multiclass Error - Test: {} - Train: {} for {} rounds\n".format(test_error, train_error, boost_rounds))
file.write("\n")
if test_error < min_test_error:
min_test_error = test_error
min_train_error = train_error
best_params = (max_depth, gamma)
print("Best params: {}, {}, Test Error: {}, Train Error: {}".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.write("Best params: {}, {}, Test Error: {}, Train Error: {}\n".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.close()
| [
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_selection.SelectKBest",
"xgboost.cv",
"xgboost.DMatrix",
"pandas.read_hdf"
]
| [((625, 662), 'pandas.read_hdf', 'pd.read_hdf', (['FILE_PATH_TRAIN', '"""train"""'], {}), "(FILE_PATH_TRAIN, 'train')\n", (636, 662), True, 'import pandas as pd\n'), ((969, 1014), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)', 'with_std': '(True)'}), '(with_mean=True, with_std=True)\n', (983, 1014), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1184, 1212), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['f_classif'], {'k': '(25)'}), '(f_classif, k=25)\n', (1195, 1212), False, 'from sklearn.feature_selection import f_classif, SelectKBest\n'), ((1427, 1528), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train_scaled_new', 'y_component_training'], {'test_size': 'TEST_SIZE', 'random_state': '(42)'}), '(x_train_scaled_new, y_component_training, test_size=\n TEST_SIZE, random_state=42)\n', (1443, 1528), False, 'from sklearn.model_selection import train_test_split\n'), ((1592, 1627), 'xgboost.DMatrix', 'xgb.DMatrix', (['x_train'], {'label': 'y_train'}), '(x_train, label=y_train)\n', (1603, 1627), True, 'import xgboost as xgb\n'), ((1637, 1668), 'xgboost.DMatrix', 'xgb.DMatrix', (['x_val'], {'label': 'y_val'}), '(x_val, label=y_val)\n', (1648, 1668), True, 'import xgboost as xgb\n'), ((4106, 4242), 'xgboost.cv', 'xgb.cv', (['params', 'xg_train'], {'num_boost_round': 'rounds', 'seed': '(42)', 'nfold': '(5)', 'metrics': "{'merror'}", 'early_stopping_rounds': '(10)', 'verbose_eval': '(True)'}), "(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics=\n {'merror'}, early_stopping_rounds=10, verbose_eval=True)\n", (4112, 4242), True, 'import xgboost as xgb\n')] |
"""
Django settings for CAutomation project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_LOGOUT_ON_GET = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
AUTH_USER_MODEL = 'cleaning.User'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
'USE_SESSION_AUTH': False,
'JSON_EDITOR': True,
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=(#vt!5x^l3-j(e*%@p0)d_p&qd2x_#&n*^i=j38@b(26zz^mr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Application definition
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'rest_auth.registration',
'rest_auth',
'common.apps.CommonConfig',
'cleaning.apps.CleaningConfig',
]
#'corsheaders',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
#'django.middleware.common.CommonMiddleware',
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#'corsheaders.middleware.CommonMiddleware',
ROOT_URLCONF = 'CAutomation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CAutomation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgres://mzqgdpoeqiolgg:<EMAIL>:5432/d96ohaomhouuat'
),
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"os.path.abspath",
"dj_database_url.config",
"os.path.join",
"pathlib.Path"
]
| [((560, 601), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""staticfiles"""'], {}), "(PROJECT_ROOT, 'staticfiles')\n", (572, 601), False, 'import os\n'), ((518, 543), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (533, 543), False, 'import os\n'), ((627, 663), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""static"""'], {}), "(PROJECT_ROOT, 'static')\n", (639, 663), False, 'import os\n'), ((4045, 4137), 'dj_database_url.config', 'dj_database_url.config', ([], {'default': '"""postgres://mzqgdpoeqiolgg:<EMAIL>:5432/d96ohaomhouuat"""'}), "(default=\n 'postgres://mzqgdpoeqiolgg:<EMAIL>:5432/d96ohaomhouuat')\n", (4067, 4137), False, 'import dj_database_url\n'), ((447, 461), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (451, 461), False, 'from pathlib import Path\n')] |
"""myst-parser package setup."""
from importlib import import_module
from setuptools import find_packages, setup
setup(
name="myst-parser",
version=import_module("myst_parser").__version__,
description=(
"An extended commonmark compliant parser, " "with bridges to docutils & sphinx."
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/MyST-Parser",
project_urls={"Documentation": "https://myst-parser.readthedocs.io"},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
entry_points={
"console_scripts": ["myst-benchmark = myst_parser.cli.benchmark:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
install_requires=["markdown-it-py~=0.4.5"],
extras_require={
"sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"],
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"beautifulsoup4",
],
"rtd": ["sphinxcontrib-bibtex", "ipython", "sphinx-book-theme", "sphinx_tabs"],
},
zip_safe=True,
)
| [
"setuptools.find_packages",
"importlib.import_module"
]
| [((625, 640), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (638, 640), False, 'from setuptools import find_packages, setup\n'), ((158, 186), 'importlib.import_module', 'import_module', (['"""myst_parser"""'], {}), "('myst_parser')\n", (171, 186), False, 'from importlib import import_module\n')] |
from django.db import models
import uuid
from datetime import datetime
from cassandra.cqlengine import columns
from django_cassandra_engine.models import DjangoCassandraModel
# Create your models here.
class PostModel(DjangoCassandraModel):
id = columns.UUID(primary_key=True, default=uuid.uuid4)
title = columns.Text(required=True)
body = columns.Text(required=True)
created_at = columns.DateTime(default=datetime.now) | [
"cassandra.cqlengine.columns.DateTime",
"cassandra.cqlengine.columns.UUID",
"cassandra.cqlengine.columns.Text"
]
| [((251, 301), 'cassandra.cqlengine.columns.UUID', 'columns.UUID', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4'}), '(primary_key=True, default=uuid.uuid4)\n', (263, 301), False, 'from cassandra.cqlengine import columns\n'), ((314, 341), 'cassandra.cqlengine.columns.Text', 'columns.Text', ([], {'required': '(True)'}), '(required=True)\n', (326, 341), False, 'from cassandra.cqlengine import columns\n'), ((353, 380), 'cassandra.cqlengine.columns.Text', 'columns.Text', ([], {'required': '(True)'}), '(required=True)\n', (365, 380), False, 'from cassandra.cqlengine import columns\n'), ((398, 436), 'cassandra.cqlengine.columns.DateTime', 'columns.DateTime', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (414, 436), False, 'from cassandra.cqlengine import columns\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8-*-
import tkinter.messagebox
from tkinter import Button, Label, Tk
from utils.functions import set_window_center
from utils.sqlite_helper import DBHelper
from inpanel import App
class InitWindow(Tk):
"""初始化窗口"""
def __init__(self):
Tk.__init__(self)
self.title("初始化数据")
set_window_center(self, 300, 180)
self.resizable(False, False)
self.win_success = None # 初始化成功的提示窗口
self.init_page()
def init_page(self):
"""加载控件"""
btn_1 = Button(self, text="初始化数据库", command=self.do_init_db)
btn_1.pack(expand="yes", padx=10, pady=10, ipadx=5, ipady=5)
def do_init_db(self):
"""初始化"""
db_helper = DBHelper()
db_helper.reset_database()
db_helper.create_database()
try:
tmp = db_helper.insert_user("admin", "admin") # 默认用户
tmp2 = db_helper.insert_content_by_username(
"admin",
"Hello World !",
"源码仓库地址:https://github.com/doudoudzj/tkinter-app",
"github",
)
tmp3 = db_helper.get_content_by_username("admin")
print("添加用户admin:", tmp)
print("添加内容:", tmp2)
print("查询内容:", tmp3)
self.do_success()
self.destroy()
except KeyError:
print(KeyError)
self.do_failed()
def do_failed(self):
"""是否重试"""
res = tkinter.messagebox.askretrycancel('提示', '初始化失败,是否重试?', parent=self)
if res is True:
self.do_init_db()
elif res is False:
self.destroy()
def do_success(self):
"""初始化成功弹窗"""
self.win_success = Tk()
self.win_success.title("初始化成功")
set_window_center(self.win_success, 250, 150)
self.win_success.resizable(False, False)
msg = Label(self.win_success, text="初始化成功")
msg.pack(expand="yes", fill="both")
btn = Button(self.win_success, text="确定", command=self.quit)
btn.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
btn_open_app = Button(self.win_success, text="启动程序", command=self.open_app)
btn_open_app.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
def open_app(self):
"""打开应用程序"""
self.quit()
self.win_success.destroy()
self.win_success.quit()
App()
if __name__ == "__main__":
APP_INIT = InitWindow()
APP_INIT.mainloop()
| [
"inpanel.App",
"utils.functions.set_window_center",
"tkinter.Button",
"tkinter.Tk",
"tkinter.Tk.__init__",
"tkinter.Label",
"utils.sqlite_helper.DBHelper"
]
| [((295, 312), 'tkinter.Tk.__init__', 'Tk.__init__', (['self'], {}), '(self)\n', (306, 312), False, 'from tkinter import Button, Label, Tk\n'), ((349, 382), 'utils.functions.set_window_center', 'set_window_center', (['self', '(300)', '(180)'], {}), '(self, 300, 180)\n', (366, 382), False, 'from utils.functions import set_window_center\n'), ((551, 603), 'tkinter.Button', 'Button', (['self'], {'text': '"""初始化数据库"""', 'command': 'self.do_init_db'}), "(self, text='初始化数据库', command=self.do_init_db)\n", (557, 603), False, 'from tkinter import Button, Label, Tk\n'), ((738, 748), 'utils.sqlite_helper.DBHelper', 'DBHelper', ([], {}), '()\n', (746, 748), False, 'from utils.sqlite_helper import DBHelper\n'), ((1736, 1740), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (1738, 1740), False, 'from tkinter import Button, Label, Tk\n'), ((1789, 1834), 'utils.functions.set_window_center', 'set_window_center', (['self.win_success', '(250)', '(150)'], {}), '(self.win_success, 250, 150)\n', (1806, 1834), False, 'from utils.functions import set_window_center\n'), ((1898, 1935), 'tkinter.Label', 'Label', (['self.win_success'], {'text': '"""初始化成功"""'}), "(self.win_success, text='初始化成功')\n", (1903, 1935), False, 'from tkinter import Button, Label, Tk\n'), ((1995, 2049), 'tkinter.Button', 'Button', (['self.win_success'], {'text': '"""确定"""', 'command': 'self.quit'}), "(self.win_success, text='确定', command=self.quit)\n", (2001, 2049), False, 'from tkinter import Button, Label, Tk\n'), ((2140, 2200), 'tkinter.Button', 'Button', (['self.win_success'], {'text': '"""启动程序"""', 'command': 'self.open_app'}), "(self.win_success, text='启动程序', command=self.open_app)\n", (2146, 2200), False, 'from tkinter import Button, Label, Tk\n'), ((2419, 2424), 'inpanel.App', 'App', ([], {}), '()\n', (2422, 2424), False, 'from inpanel import App\n')] |
# -*- coding=utf-8 -*-
from zwechathihu.mypdf import GenPdf
from db.mysqlite import simpleToolSql
data=[{"url": "http://mp.weixin.qq.com/s?__biz=MzAxODQxMDM0Mw==&mid=2247484852&idx=1&sn=85b50b8b0470bb4897e517955f4e5002&chksm=9bd7fbbcaca072aa75e2a241064a403fde1e579d57ab846cd8537a54253ceb2c8b93cc3bf38e&scene=21#wechat_redirect", "name": "001学习算法和刷题的框架思维"}
]
# path = '***/' || ''
# for val in data:
# # print(val["url"])
# # print(val["name"])
# pdf = GenPdf()
# title = val["name"].replace("/", "-")
# print(title)
# pdf.deal(val["url"], title, '')
# sql = simpleToolSql("url")
# # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)])
# res = sql.query("select * from wx_article;")
# print(res)
# res = sql.query("select * from wx_article where id=?;",(3,))
# print(res)
# sql.close()
# 从 db 获取需要生成的url
def getListByTitle(title:str):
sql = simpleToolSql("url")
res = sql.query("select * from wx_article where title="+title+";")
print(res)
sql.close()
return res
# 从 db 获取需要生成的url
def getListFromSql():
sql = simpleToolSql("url")
# res = sql.query("select * from wx_article where state=0;")
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return res
# 更新 db
def updateUrl(id:int):
sql = simpleToolSql("url")
res = sql.execute("update wx_article set state=1 where id = ?;",(id,))
# 需要加逗号 https://blog.csdn.net/yimaoyingbi/article/details/104323701
print(res)
sql.close()
return
def addUrl():
sql = simpleToolSql("url")
sql.execute(
"insert into wx_article (url,folder,title,state,turn,create_at,update_at) values (?,?,?,?,?,?);",
[("http",'test',"01",0,1,"2020-12-03 09:38:25","2020-12-03 09:38:25")]
)
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return
# addUrl()
updateUrl(1)
res = getListFromSql()
print(res) | [
"db.mysqlite.simpleToolSql"
]
| [((918, 938), 'db.mysqlite.simpleToolSql', 'simpleToolSql', (['"""url"""'], {}), "('url')\n", (931, 938), False, 'from db.mysqlite import simpleToolSql\n'), ((1107, 1127), 'db.mysqlite.simpleToolSql', 'simpleToolSql', (['"""url"""'], {}), "('url')\n", (1120, 1127), False, 'from db.mysqlite import simpleToolSql\n'), ((1330, 1350), 'db.mysqlite.simpleToolSql', 'simpleToolSql', (['"""url"""'], {}), "('url')\n", (1343, 1350), False, 'from db.mysqlite import simpleToolSql\n'), ((1567, 1587), 'db.mysqlite.simpleToolSql', 'simpleToolSql', (['"""url"""'], {}), "('url')\n", (1580, 1587), False, 'from db.mysqlite import simpleToolSql\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from pipeline.core.flow.event import EndEvent
from pipeline.core.flow.signals import post_new_end_event_register
from pipeline.validators import rules
@receiver(post_new_end_event_register, sender=EndEvent)
def post_new_end_event_register_handler(sender, node_type, node_cls, **kwargs):
rules.NODE_RULES[node_type] = rules.SINK_RULE
rules.FLOW_NODES_WITHOUT_STARTEVENT.append(node_type)
| [
"django.dispatch.receiver",
"pipeline.validators.rules.FLOW_NODES_WITHOUT_STARTEVENT.append"
]
| [((925, 979), 'django.dispatch.receiver', 'receiver', (['post_new_end_event_register'], {'sender': 'EndEvent'}), '(post_new_end_event_register, sender=EndEvent)\n', (933, 979), False, 'from django.dispatch import receiver\n'), ((1114, 1167), 'pipeline.validators.rules.FLOW_NODES_WITHOUT_STARTEVENT.append', 'rules.FLOW_NODES_WITHOUT_STARTEVENT.append', (['node_type'], {}), '(node_type)\n', (1156, 1167), False, 'from pipeline.validators import rules\n')] |
# -*- coding: utf-8 -*-
"""
The channel model UI module
Copyright 2017-2018, <NAME> and <NAME>
SPDX-License-Identifier: MIT
"""
# pylint: disable=import-error
import os
import xbmcgui
import xbmcplugin
import resources.lib.mvutils as mvutils
from resources.lib.channel import Channel
class ChannelUI(Channel):
"""
The channel model view class
Args:
plugin(MediathekView): the plugin object
sortmethods(array, optional): an array of sort methods
for the directory representation. Default is
`[ xbmcplugin.SORT_METHOD_TITLE ]`
nextdir(str, optional):
"""
def __init__(self, plugin, sortmethods=None, nextdir='initial'):
super(ChannelUI, self).__init__()
self.plugin = plugin
self.handle = plugin.addon_handle
self.nextdir = nextdir
self.sortmethods = sortmethods if sortmethods is not None else [
xbmcplugin.SORT_METHOD_TITLE]
self.count = 0
def begin(self):
"""
Begin a directory containing channels
"""
for method in self.sortmethods:
xbmcplugin.addSortMethod(self.handle, method)
def add(self, altname=None):
"""
Add the current entry to the directory
Args:
altname(str, optional): alternative name for the entry
"""
resultingname = self.channel if self.count == 0 else '%s (%d)' % (
self.channel, self.count, )
list_item = xbmcgui.ListItem(
label=resultingname if altname is None else altname)
icon = os.path.join(
self.plugin.path,
'resources',
'icons',
self.channel.lower() + '-m.png'
)
list_item.setArt({
'thumb': icon,
'icon': icon
})
info_labels = {
'title': resultingname,
'sorttitle': resultingname.lower()
}
list_item.setInfo(type='video', infoLabels=info_labels)
xbmcplugin.addDirectoryItem(
handle=self.handle,
url=mvutils.build_url({
'mode': self.nextdir,
'channel': self.channelid
}),
listitem=list_item,
isFolder=True
)
def end(self):
""" Finish a directory containing channels """
xbmcplugin.endOfDirectory(self.handle)
| [
"xbmcplugin.endOfDirectory",
"xbmcgui.ListItem",
"xbmcplugin.addSortMethod",
"resources.lib.mvutils.build_url"
]
| [((1490, 1559), 'xbmcgui.ListItem', 'xbmcgui.ListItem', ([], {'label': '(resultingname if altname is None else altname)'}), '(label=resultingname if altname is None else altname)\n', (1506, 1559), False, 'import xbmcgui\n'), ((2357, 2395), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['self.handle'], {}), '(self.handle)\n', (2382, 2395), False, 'import xbmcplugin\n'), ((1122, 1167), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['self.handle', 'method'], {}), '(self.handle, method)\n', (1146, 1167), False, 'import xbmcplugin\n'), ((2090, 2158), 'resources.lib.mvutils.build_url', 'mvutils.build_url', (["{'mode': self.nextdir, 'channel': self.channelid}"], {}), "({'mode': self.nextdir, 'channel': self.channelid})\n", (2107, 2158), True, 'import resources.lib.mvutils as mvutils\n')] |
#!/usr/bin/env python3
#Credit to @Alright for the RPCs
import re
import os
import requests
import json
import platform
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = "dont have windows machine now to test"
# define config file path
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
#define rpc creds
with open(coin_config_file, 'r') as f:
#print("Reading config file for credentials:", coin_config_file)
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
return('http://' + rpcuser + ':' + rpcpassword + '@127.0.0.1:' + rpcport)
# define function that posts json data
def post_rpc(url, payload, auth=None):
try:
r = requests.post(url, data=json.dumps(payload), auth=auth)
return(json.loads(r.text))
except Exception as e:
raise Exception("Couldn't connect to " + url + ": ", e)
# Return current -pubkey=
def getpubkey_rpc(chain):
getinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "getinfo",
"params": []}
getinfo_result = post_rpc(def_credentials(chain), getinfo_payload)
return(getinfo_result['result']['pubkey'])
# return latest batontxid from all publishers
def get_latest_batontxids(chain, oracletxid):
oraclesinfo_result = oraclesinfo_rpc(chain, oracletxid)
latest_batontxids = {}
# fill "latest_batontxids" dictionary with publisher:batontxid data
for i in oraclesinfo_result['registered']:
latest_batontxids[i['publisher']] = i['batontxid']
return(latest_batontxids)
#VANILLA RPC
def sendrawtx_rpc(chain, rawtx):
sendrawtx_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "sendrawtransaction",
"params": [rawtx]}
#rpcurl = def_credentials(chain)
return(post_rpc(def_credentials(chain), sendrawtx_payload))
def signmessage_rpc(chain, address, message):
signmessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "signmessage",
"params": [
address,
message
]
}
signmessage_result = post_rpc(def_credentials(chain), signmessage_payload)
return(signmessage_result['result'])
def verifymessage_rpc(chain, address, signature, message):
verifymessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "verifymessage",
"params": [
address,
signature,
message
]
}
verifymessage_result = post_rpc(def_credentials(chain), verifymessage_payload)
return(verifymessage_result['result'])
def kvsearch_rpc(chain, key):
kvsearch_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvsearch",
"params": [
key
]
}
kvsearch_result = post_rpc(def_credentials(chain), kvsearch_payload)
return(kvsearch_result['result'])
def kvupdate_rpc(chain, key, value, days, password):
# create dynamic oraclessamples payload
kvupdate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvupdate",
"params": [
key,
value,
str(days),
password]}
# make kvupdate rpc call
kvupdate_result = post_rpc(def_credentials(chain), kvupdate_payload)
return(kvupdate_result)
def oraclesdata_rpc(chain, oracletxid, hexstr):
oraclesdata_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesdata",
"params": [
oracletxid,
hexstr]}
oraclesdata_result = post_rpc(def_credentials(chain), oraclesdata_payload)
return(oraclesdata_result['result'])
def oraclescreate_rpc(chain, name, description, oracle_type):
oraclescreate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclescreate",
"params": [
name,
description,
oracle_type]}
oraclescreate_result = post_rpc(def_credentials(chain), oraclescreate_payload)
return(oraclescreate_result['result'])
def oraclesinfo_rpc(chain, oracletxid):
oraclesinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesinfo",
"params": [oracletxid]}
oraclesinfo_result = post_rpc(def_credentials(chain), oraclesinfo_payload)
return(oraclesinfo_result['result'])
def oracleslist_rpc(chain):
oracleslist_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oracleslist",
"params": []}
oracleslist_result = post_rpc(def_credentials(chain), oracleslist_payload)
return(oracleslist_result['result'])
def oraclessubscribe_rpc(chain, oracletxid, publisher, amount):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [oracletxid, publisher, amount]}
oraclessubscribe_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(oraclessubscribe_result['result'])
def oraclesregister_rpc(chain, oracletxid, datafee):
oraclesregister_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesregister",
"params": [
oracletxid,
str(datafee)]}
oraclesregister_result = post_rpc(def_credentials(chain), oraclesregister_payload)
return(oraclesregister_result['result'])
def oraclessamples_rpc(chain, oracletxid, batonutxo, num):
oraclessamples_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessamples",
"params": [
oracletxid,
batonutxo,
str(num)]}
oraclessamples_result = post_rpc(def_credentials(chain), oraclessamples_payload)
return(oraclessamples_result['result'])
def getlastsegidstakes_rpc(chain, depth):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [depth]}
getlastsegidstakes_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(getlastsegidstakes_result['result'])
| [
"platform.system",
"json.dumps",
"json.loads",
"re.search"
]
| [((225, 242), 'platform.system', 'platform.system', ([], {}), '()\n', (240, 242), False, 'import platform\n'), ((1466, 1484), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1476, 1484), False, 'import json\n'), ((940, 963), 're.search', 're.search', (['"""rpcuser"""', 'l'], {}), "('rpcuser', l)\n", (949, 963), False, 'import re\n'), ((1034, 1061), 're.search', 're.search', (['"""rpcpassword"""', 'l'], {}), "('rpcpassword', l)\n", (1043, 1061), False, 'import re\n'), ((1419, 1438), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1429, 1438), False, 'import json\n'), ((1140, 1163), 're.search', 're.search', (['"""rpcport"""', 'l'], {}), "('rpcport', l)\n", (1149, 1163), False, 'import re\n')] |
# -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TransactionRecordDictionaryDecoder(Decoder):
def __init__(self):
super(TransactionRecordDictionaryDecoder, self).__init__()
self._decoders = {}
self._decoders['ACK'] = AcknowledgementDictionaryDecoder()
self._decoders['AGR'] = AgreementDictionaryDecoder()
self._decoders['TER'] = AgreementTerritoryDictionaryDecoder()
self._decoders['ARI'] = AdditionalRelatedInformationDictionaryDecoder()
self._decoders['ALT'] = AlternateTitleDictionaryDecoder()
self._decoders['EWT'] = AuthoredWorkDictionaryDecoder()
self._decoders['VER'] = AuthoredWorkDictionaryDecoder()
self._decoders['COM'] = ComponentDictionaryDecoder()
self._decoders['IPA'] = InterestedPartyForAgreementDictionaryDecoder()
self._decoders['SPT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['SWT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['IND'] = InstrumentationDetailDictionaryDecoder()
self._decoders['INS'] = InstrumentationSummaryDictionaryDecoder()
self._decoders['MSG'] = MessageDictionaryDecoder()
self._decoders['PER'] = PerformingArtistDictionaryDecoder()
self._decoders['PWR'] = PublisherForWriterDictionaryDecoder()
self._decoders['REC'] = RecordingDetailDictionaryDecoder()
self._decoders['EXC'] = WorkDictionaryDecoder()
self._decoders['ISW'] = WorkDictionaryDecoder()
self._decoders['NWR'] = WorkDictionaryDecoder()
self._decoders['REV'] = WorkDictionaryDecoder()
self._decoders['ORN'] = WorkOriginDictionaryDecoder()
self._decoders['SWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders[
'NPA'] = NonRomanAlphabetAgreementPartyDictionaryDecoder()
self._decoders['NOW'] = NonRomanAlphabetOtherWriterDictionaryDecoder()
self._decoders[
'NPR'] = NonRomanAlphabetPerformanceDataDictionaryDecoder()
self._decoders['NPN'] = NonRomanAlphabetPublisherNameDictionaryDecoder()
self._decoders['NAT'] = NonRomanAlphabetTitleDictionaryDecoder()
self._decoders['NET'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NCT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NVT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NWN'] = NonRomanAlphabetWriterNameDictionaryDecoder()
self._decoders['SPU'] = PublisherRecordDictionaryDecoder()
self._decoders['OPU'] = PublisherRecordDictionaryDecoder()
def decode(self, data):
return self._decoders[data['record_type']].decode(data)
class AcknowledgementDictionaryDecoder(Decoder):
def __init__(self):
super(AcknowledgementDictionaryDecoder, self).__init__()
def decode(self, data):
return AcknowledgementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
original_group_id=data[
'original_group_id'],
original_transaction_sequence_n=data[
'original_transaction_sequence_n'],
original_transaction_type=data[
'original_transaction_type'],
transaction_status=data[
'transaction_status'],
creation_date_time=data[
'creation_date_time'],
processing_date=data['processing_date'],
creation_title=data['creation_title'],
submitter_creation_n=data[
'submitter_creation_n'],
recipient_creation_n=data[
'recipient_creation_n'])
class AgreementDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
agreement_type=data['agreement_type'],
agreement_start_date=data[
'agreement_start_date'],
prior_royalty_status=data[
'prior_royalty_status'],
post_term_collection_status=data[
'post_term_collection_status'],
number_of_works=data['number_of_works'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'],
international_standard_code=data[
'international_standard_code'],
sales_manufacture_clause=data[
'sales_manufacture_clause'],
agreement_end_date=data['agreement_end_date'],
date_of_signature=data['date_of_signature'],
retention_end_date=data['retention_end_date'],
prior_royalty_start_date=data[
'prior_royalty_start_date'],
post_term_collection_end_date=data[
'post_term_collection_end_date'],
shares_change=data['shares_change'],
advance_given=data['advance_given'])
class AgreementTerritoryDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementTerritoryDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementTerritoryRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
tis_numeric_code=data[
'tis_numeric_code'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'])
class AdditionalRelatedInformationDictionaryDecoder(Decoder):
def __init__(self):
super(AdditionalRelatedInformationDictionaryDecoder, self).__init__()
def decode(self, data):
return AdditionalRelatedInfoRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
society_n=data['society_n'],
type_of_right=data['type_of_right'],
work_n=data['work_n'],
subject_code=data['subject_code'],
note=data['note'])
class AlternateTitleDictionaryDecoder(Decoder):
def __init__(self):
super(AlternateTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return AlternateTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
alternate_title=data['alternate_title'],
title_type=data['title_type'],
language_code=data['language_code'])
class AuthoredWorkDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(AuthoredWorkDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data[
'writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data[
'writer_2_ipi_base_n'])
return AuthoredWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_first_name=data[
'writer_1_first_name'],
writer_1_last_name=data['writer_1_last_name'],
writer_2_first_name=data[
'writer_2_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data[
'writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data[
'writer_2_ipi_name_n'],
source=data['source'],
language_code=data['language_code'],
iswc=data['iswc'])
class ComponentDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(ComponentDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data['writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data['writer_2_ipi_base_n'])
return ComponentRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_last_name=data['writer_1_last_name'],
writer_1_first_name=data['writer_1_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_2_first_name=data['writer_2_first_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data['writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data['writer_2_ipi_name_n'],
iswc=data['iswc'],
duration=data['duration'])
class GroupHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(GroupHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
return GroupHeader(record_type=data['record_type'],
group_id=data['group_id'],
transaction_type=data['transaction_type'],
version_number=data['version_number'],
batch_request_id=data['batch_request_id'])
class GroupTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(GroupTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
total_monetary_value = None
if 'total_monetary_value' in data:
total_monetary_value = data['total_monetary_value']
currency_indicator = None
if 'currency_indicator' in data:
currency_indicator = data['currency_indicator']
return GroupTrailer(record_type=data['record_type'],
group_id=data['group_id'],
transaction_count=data['transaction_count'],
record_count=data['record_count'],
currency_indicator=currency_indicator,
total_monetary_value=total_monetary_value,
)
class InterestedPartyForAgreementDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(InterestedPartyForAgreementDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
return InterestedPartyForAgreementRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_n=data['ip_n'],
ip_last_name=data['ip_last_name'],
agreement_role_code=data['agreement_role_code'],
ip_writer_first_name=data['ip_writer_first_name'],
ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base,
pr_society=data['pr_society'], pr_share=data['pr_share'],
mr_society=data['mr_society'], mr_share=data['mr_share'],
sr_society=data['sr_society'], sr_share=data['sr_share'])
class IPTerritoryOfControlDictionaryDecoder(Decoder):
def __init__(self):
super(IPTerritoryOfControlDictionaryDecoder, self).__init__()
def decode(self, data):
record = IPTerritoryOfControlRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
ip_n=data['ip_n'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'],
tis_numeric_code=data[
'tis_numeric_code'],
sequence_n=data['sequence_n'],
pr_collection_share=data[
'pr_collection_share'],
mr_collection_share=data[
'mr_collection_share'],
shares_change=data['shares_change'])
if 'sr_collection_share' in data:
record.sr_collection_share = data['sr_collection_share']
return record
class InstrumentationDetailDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationDetailDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
instrument_code=data[
'instrument_code'],
number_players=data[
'number_players'])
class InstrumentationSummaryDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationSummaryDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationSummaryRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
number_voices=data['number_voices'],
standard_instrumentation_type=data['standard_instrumentation_type'],
instrumentation_description=data['instrumentation_description'])
class MessageDictionaryDecoder(Decoder):
def __init__(self):
super(MessageDictionaryDecoder, self).__init__()
def decode(self, data):
return MessageRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
message_type=data['message_type'],
message_text=data['message_text'],
original_record_sequence_n=data[
'original_record_sequence_n'],
message_record_type=data['message_record_type'],
message_level=data['message_level'],
validation_n=data['validation_n'])
class PerformingArtistDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PerformingArtistDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = None
if 'performing_artist_ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['performing_artist_ipi_base_n'])
performing_artist_first_name = None
if 'performing_artist_first_name' in data:
performing_artist_first_name = data['performing_artist_first_name']
performing_artist_ipi_name_n = None
if 'performing_artist_ipi_name_n' in data:
performing_artist_ipi_name_n = data['performing_artist_ipi_name_n']
return PerformingArtistRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
performing_artist_last_name=data[
'performing_artist_last_name'],
performing_artist_first_name=performing_artist_first_name,
performing_artist_ipi_name_n=performing_artist_ipi_name_n,
performing_artist_ipi_base_n=ipi_base)
class PublisherForWriterDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherForWriterDictionaryDecoder, self).__init__()
def decode(self, data):
publisher_name = None
if 'publisher_name' in data:
publisher_name = data['publisher_name']
return PublisherForWriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
publisher_ip_n=data['publisher_ip_n'],
publisher_name=publisher_name,
writer_ip_n=data['writer_ip_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'])
class RecordingDetailDictionaryDecoder(Decoder):
def __init__(self):
super(RecordingDetailDictionaryDecoder, self).__init__()
def decode(self, data):
media_type = None
if 'media_type' in data:
media_type = data['media_type']
return RecordingDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
first_release_date=data[
'first_release_date'],
first_release_duration=data[
'first_release_duration'],
first_album_title=data[
'first_album_title'],
first_album_label=data[
'first_album_label'],
first_release_catalog_n=data[
'first_release_catalog_n'],
ean=data['ean'],
isrc=data['isrc'],
recording_format=data['recording_format'],
recording_technique=data[
'recording_technique'],
media_type=media_type)
class FileDictionaryDecoder(Decoder):
def __init__(self):
super(FileDictionaryDecoder, self).__init__()
self._tag_decoder = FileTagDictionaryDecoder()
self._transmission_decoder = TransmissionDictionaryDecoder()
def decode(self, data):
tag = data['tag']
if isinstance(tag, dict):
tag = self._tag_decoder.decode(tag)
transmission = data['transmission']
if isinstance(transmission, dict):
transmission = self._transmission_decoder.decode(transmission)
return CWRFile(tag, transmission)
class TransmissionDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionDictionaryDecoder, self).__init__()
self._header_decoder = TransmissionHeaderDictionaryDecoder()
self._trailer_decoder = TransmissionTrailerDictionaryDecoder()
self._group_decoder = GroupDictionaryDecoder()
def decode(self, data):
header = data['header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
groups = []
if len(data['groups']) > 0:
if isinstance(data['groups'][0], dict):
for group in data['groups']:
groups.append(self._group_decoder.decode(group))
else:
groups = data['groups']
return Transmission(header, trailer, groups)
class GroupDictionaryDecoder(Decoder):
def __init__(self):
super(GroupDictionaryDecoder, self).__init__()
self._header_decoder = GroupHeaderDictionaryDecoder()
self._trailer_decoder = GroupTrailerDictionaryDecoder()
self._transaction_decoder = TransactionRecordDictionaryDecoder()
def decode(self, data):
header = data['group_header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['group_trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
transactions = []
if len(data['transactions']) > 0:
if isinstance(data['transactions'][0][0], dict):
for transaction in data['transactions']:
transaction_records = []
for record in transaction:
transaction_records.append(
self._transaction_decoder.decode(record))
transactions.append(transaction_records)
else:
transactions = data['transactions']
return Group(header, trailer, transactions)
class TransmissionHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
header = TransmissionHeader(record_type=data['record_type'],
sender_id=data['sender_id'],
sender_name=data['sender_name'],
sender_type=data['sender_type'],
creation_date_time=data[
'creation_date_time'],
transmission_date=data['transmission_date'],
edi_standard=data['edi_standard'])
if 'character_set' in data:
header.character_set = data['character_set']
return header
class TransmissionTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
return TransmissionTrailer(record_type=data['record_type'],
group_count=data['group_count'],
transaction_count=data['transaction_count'],
record_count=data['record_count'])
class WorkDictionaryDecoder(Decoder):
def __init__(self):
super(WorkDictionaryDecoder, self).__init__()
def decode(self, data):
catalogue_number = None
if 'catalogue_number' in data:
catalogue_number = data['catalogue_number']
exceptional_clause = None
if 'exceptional_clause' in data:
exceptional_clause = data['exceptional_clause']
opus_number = None
if 'opus_number' in data:
opus_number = data['opus_number']
priority_flag = None
if 'priority_flag' in data:
priority_flag = data['priority_flag']
return WorkRecord(record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_work_n=data['submitter_work_n'],
title=data['title'],
version_type=data['version_type'],
musical_work_distribution_category=data[
'musical_work_distribution_category'],
date_publication_printed_edition=data[
'date_publication_printed_edition'],
text_music_relationship=data[
'text_music_relationship'],
language_code=data['language_code'],
copyright_number=data['copyright_number'],
copyright_date=data['copyright_date'],
music_arrangement=data['music_arrangement'],
lyric_adaptation=data['lyric_adaptation'],
excerpt_type=data['excerpt_type'],
composite_type=data['composite_type'],
composite_component_count=data[
'composite_component_count'],
iswc=data['iswc'],
work_type=data['work_type'],
duration=data['duration'],
catalogue_number=catalogue_number,
opus_number=opus_number,
contact_id=data['contact_id'],
contact_name=data['contact_name'],
recorded_indicator=data['recorded_indicator'],
priority_flag=priority_flag,
exceptional_clause=exceptional_clause,
grand_rights_indicator=data['grand_rights_indicator'])
class WorkOriginDictionaryDecoder(Decoder):
def __init__(self):
super(WorkOriginDictionaryDecoder, self).__init__()
def decode(self, data):
return WorkOriginRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
intended_purpose=data['intended_purpose'],
production_title=data['production_title'],
cd_identifier=data['cd_identifier'],
cut_number=data['cut_number'],
library=data['library'],
bltvr=data['bltvr'],
visan=data['visan'],
production_n=data['production_n'],
episode_title=data['episode_title'],
episode_n=data['episode_n'],
year_production=data['year_production'],
audio_visual_key=data['audio_visual_key'])
class WriterDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(WriterDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_n = self._ipi_base_decoder.decode(data['ipi_base_n'])
return Writer(ip_n=data['ip_n'],
personal_number=data['personal_number'],
ipi_base_n=ipi_base_n,
writer_first_name=data['writer_first_name'],
writer_last_name=data['writer_last_name'],
tax_id=data['tax_id'],
ipi_name_n=data['ipi_name_n'])
class WriterRecordDictionaryDecoder(Decoder):
def __init__(self):
super(WriterRecordDictionaryDecoder, self).__init__()
self._writer_decoder = WriterDictionaryDecoder()
def decode(self, data):
writer = self._writer_decoder.decode(data['writer'])
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
return WriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer=writer,
writer_designation=data['writer_designation'],
work_for_hire=data['work_for_hire'],
writer_unknown=data['writer_unknown'],
reversionary=data['reversionary'],
first_recording_refusal=data[
'first_recording_refusal'],
usa_license=usa_license,
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'])
class NonRomanAlphabetAgreementPartyDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetAgreementPartyDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetAgreementPartyRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_name=data['ip_name'],
ip_writer_name=data['ip_writer_name'],
ip_n=data['ip_n'],
language_code=data['language_code'])
class NonRomanAlphabetOtherWriterDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetOtherWriterDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetOtherWriterRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer_first_name=data['writer_first_name'],
writer_name=data['writer_name'],
position=data['position'],
language_code=data['language_code'])
class NonRomanAlphabetPerformanceDataDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(NonRomanAlphabetPerformanceDataDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(
data['performing_artist_ipi_base_n'])
return NonRomanAlphabetPerformanceDataRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
performing_artist_first_name=data['performing_artist_first_name'],
performing_artist_name=data['performing_artist_name'],
performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],
performing_artist_ipi_base_n=ipi_base,
language_code=data['language_code'],
performance_language=data['performance_language'],
performance_dialect=data['performance_dialect'])
class NonRomanAlphabetPublisherNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetPublisherNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetPublisherNameRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher_sequence_n=data['publisher_sequence_n'],
ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
language_code=data['language_code'])
class NonRomanAlphabetTitleDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
title_type=data['title_type'],
language_code=data['language_code'])
class NonRomanAlphabetWorkDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWorkDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
language_code=data['language_code'])
class NonRomanAlphabetWriterNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWriterNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWriterNameRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
writer_first_name=data[
'writer_first_name'],
writer_last_name=data[
'writer_last_name'],
ip_n=data['ip_n'],
language_code=data[
'language_code'])
class PublisherDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PublisherDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
if 'ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
else:
ipi_base = None
return Publisher(ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
ipi_name_n=data['ipi_name_n'],
ipi_base_n=ipi_base,
tax_id=data['tax_id'])
class PublisherRecordDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherRecordDictionaryDecoder, self).__init__()
self._publisher_decoder = PublisherDictionaryDecoder()
def decode(self, data):
publisher = self._publisher_decoder.decode(data['publisher'])
special_agreements = None
if 'special_agreements' in data:
special_agreements = data['special_agreements']
first_recording_refusal = None
if 'first_recording_refusal' in data:
first_recording_refusal = data['first_recording_refusal']
agreement_type = None
if 'agreement_type' in data:
agreement_type = data['agreement_type']
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
international_standard_code = None
if 'international_standard_code' in data:
international_standard_code = data['international_standard_code']
society_assigned_agreement_n = None
if 'society_assigned_agreement_n' in data:
society_assigned_agreement_n = data['society_assigned_agreement_n']
return PublisherRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher=publisher,
publisher_sequence_n=data['publisher_sequence_n'],
submitter_agreement_n=data['submitter_agreement_n'],
publisher_type=data['publisher_type'],
publisher_unknown=data['publisher_unknown'],
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'],
special_agreements=special_agreements,
first_recording_refusal=first_recording_refusal,
international_standard_code=international_standard_code,
society_assigned_agreement_n=society_assigned_agreement_n,
agreement_type=agreement_type,
usa_license=usa_license)
class TableValueDictionaryDecoder(Decoder):
def __init__(self):
super(TableValueDictionaryDecoder, self).__init__()
def decode(self, data):
return TableValue(code=data['code'],
name=data['name'],
description=data['description'])
class MediaTypeValueDictionaryDecoder(Decoder):
def __init__(self):
super(MediaTypeValueDictionaryDecoder, self).__init__()
def decode(self, data):
return MediaTypeValue(code=data['code'],
name=data['name'],
media_type=data['media_type'],
duration_max=data['duration_max'],
works_max=data['works_max'],
fragments_max=data['fragments_max'])
class InstrumentValueDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentValueDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentValue(code=data['code'],
name=data['name'],
family=data['family'],
description=data['description'])
class FileTagDictionaryDecoder(Decoder):
def __init__(self):
super(FileTagDictionaryDecoder, self).__init__()
def decode(self, data):
return FileTag(data['year'],
data['sequence_n'],
data['sender'],
data['receiver'],
data['version'])
class AVIKeyDictionaryDecoder(Decoder):
def __init__(self):
super(AVIKeyDictionaryDecoder, self).__init__()
def decode(self, data):
return AVIKey(data['society_code'],
data['av_number'])
class IPIBaseDictionaryDecoder(Decoder):
def __init__(self):
super(IPIBaseDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class ISWCDictionaryDecoder(Decoder):
def __init__(self):
super(ISWCDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class VISANDictionaryDecoder(Decoder):
def __init__(self):
super(VISANDictionaryDecoder, self).__init__()
def decode(self, data):
return data
| [
"cwr.table_value.MediaTypeValue",
"cwr.group.GroupTrailer",
"cwr.other.AVIKey",
"cwr.table_value.TableValue",
"cwr.agreement.InterestedPartyForAgreementRecord",
"cwr.work.AuthoredWorkRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetPerformanceDataRecord",
"cwr.group.GroupHeader",
"cwr.work.InstrumentationSummaryRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetPublisherNameRecord",
"cwr.interested_party.Writer",
"cwr.work.WorkOriginRecord",
"cwr.acknowledgement.MessageRecord",
"cwr.interested_party.IPTerritoryOfControlRecord",
"cwr.file.FileTag",
"cwr.interested_party.PublisherRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetWriterNameRecord",
"cwr.table_value.InstrumentValue",
"cwr.work.AlternateTitleRecord",
"cwr.interested_party.PublisherForWriterRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetWorkRecord",
"cwr.work.InstrumentationDetailRecord",
"cwr.work.RecordingDetailRecord",
"cwr.info.AdditionalRelatedInfoRecord",
"cwr.agreement.AgreementRecord",
"cwr.transmission.TransmissionTrailer",
"cwr.non_roman_alphabet.NonRomanAlphabetOtherWriterRecord",
"cwr.group.Group",
"cwr.transmission.TransmissionHeader",
"cwr.interested_party.Publisher",
"cwr.agreement.AgreementTerritoryRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetTitleRecord",
"cwr.interested_party.WriterRecord",
"cwr.work.ComponentRecord",
"cwr.non_roman_alphabet.NonRomanAlphabetAgreementPartyRecord",
"cwr.file.CWRFile",
"cwr.transmission.Transmission",
"cwr.acknowledgement.AcknowledgementRecord",
"cwr.work.PerformingArtistRecord",
"cwr.work.WorkRecord"
]
| [((4848, 5498), 'cwr.acknowledgement.AcknowledgementRecord', 'AcknowledgementRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'original_group_id': "data['original_group_id']", 'original_transaction_sequence_n': "data['original_transaction_sequence_n']", 'original_transaction_type': "data['original_transaction_type']", 'transaction_status': "data['transaction_status']", 'creation_date_time': "data['creation_date_time']", 'processing_date': "data['processing_date']", 'creation_title': "data['creation_title']", 'submitter_creation_n': "data['submitter_creation_n']", 'recipient_creation_n': "data['recipient_creation_n']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], original_group_id=data[\n 'original_group_id'], original_transaction_sequence_n=data[\n 'original_transaction_sequence_n'], original_transaction_type=data[\n 'original_transaction_type'], transaction_status=data[\n 'transaction_status'], creation_date_time=data['creation_date_time'],\n processing_date=data['processing_date'], creation_title=data[\n 'creation_title'], submitter_creation_n=data['submitter_creation_n'],\n recipient_creation_n=data['recipient_creation_n'])\n", (4869, 5498), False, 'from cwr.acknowledgement import AcknowledgementRecord, MessageRecord\n'), ((6415, 7457), 'cwr.agreement.AgreementRecord', 'AgreementRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'submitter_agreement_n': "data['submitter_agreement_n']", 'agreement_type': "data['agreement_type']", 'agreement_start_date': "data['agreement_start_date']", 'prior_royalty_status': "data['prior_royalty_status']", 'post_term_collection_status': "data['post_term_collection_status']", 'number_of_works': "data['number_of_works']", 'society_assigned_agreement_n': "data['society_assigned_agreement_n']", 'international_standard_code': "data['international_standard_code']", 'sales_manufacture_clause': "data['sales_manufacture_clause']", 'agreement_end_date': "data['agreement_end_date']", 'date_of_signature': "data['date_of_signature']", 'retention_end_date': "data['retention_end_date']", 'prior_royalty_start_date': "data['prior_royalty_start_date']", 'post_term_collection_end_date': "data['post_term_collection_end_date']", 'shares_change': "data['shares_change']", 'advance_given': "data['advance_given']"}), "(record_type=data['record_type'], transaction_sequence_n=\n data['transaction_sequence_n'], record_sequence_n=data[\n 'record_sequence_n'], submitter_agreement_n=data[\n 'submitter_agreement_n'], agreement_type=data['agreement_type'],\n agreement_start_date=data['agreement_start_date'], prior_royalty_status\n =data['prior_royalty_status'], post_term_collection_status=data[\n 'post_term_collection_status'], number_of_works=data['number_of_works'],\n society_assigned_agreement_n=data['society_assigned_agreement_n'],\n international_standard_code=data['international_standard_code'],\n sales_manufacture_clause=data['sales_manufacture_clause'],\n agreement_end_date=data['agreement_end_date'], date_of_signature=data[\n 'date_of_signature'], retention_end_date=data['retention_end_date'],\n prior_royalty_start_date=data['prior_royalty_start_date'],\n post_term_collection_end_date=data['post_term_collection_end_date'],\n shares_change=data['shares_change'], advance_given=data['advance_given'])\n", (6430, 7457), False, 'from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, InterestedPartyForAgreementRecord\n'), ((8504, 8791), 'cwr.agreement.AgreementTerritoryRecord', 'AgreementTerritoryRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'tis_numeric_code': "data['tis_numeric_code']", 'inclusion_exclusion_indicator': "data['inclusion_exclusion_indicator']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], tis_numeric_code=data[\n 'tis_numeric_code'], inclusion_exclusion_indicator=data[\n 'inclusion_exclusion_indicator'])\n", (8528, 8791), False, 'from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, InterestedPartyForAgreementRecord\n'), ((9324, 9644), 'cwr.info.AdditionalRelatedInfoRecord', 'AdditionalRelatedInfoRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'society_n': "data['society_n']", 'type_of_right': "data['type_of_right']", 'work_n': "data['work_n']", 'subject_code': "data['subject_code']", 'note': "data['note']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], society_n=data['society_n'\n ], type_of_right=data['type_of_right'], work_n=data['work_n'],\n subject_code=data['subject_code'], note=data['note'])\n", (9351, 9644), False, 'from cwr.info import AdditionalRelatedInfoRecord\n'), ((10207, 10487), 'cwr.work.AlternateTitleRecord', 'AlternateTitleRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'alternate_title': "data['alternate_title']", 'title_type': "data['title_type']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], alternate_title=data[\n 'alternate_title'], title_type=data['title_type'], language_code=data[\n 'language_code'])\n", (10227, 10487), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((11327, 12016), 'cwr.work.AuthoredWorkRecord', 'AuthoredWorkRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'title': "data['title']", 'submitter_work_n': "data['submitter_work_n']", 'writer_1_first_name': "data['writer_1_first_name']", 'writer_1_last_name': "data['writer_1_last_name']", 'writer_2_first_name': "data['writer_2_first_name']", 'writer_2_last_name': "data['writer_2_last_name']", 'writer_1_ipi_base_n': 'ipi_base_1', 'writer_1_ipi_name_n': "data['writer_1_ipi_name_n']", 'writer_2_ipi_base_n': 'ipi_base_2', 'writer_2_ipi_name_n': "data['writer_2_ipi_name_n']", 'source': "data['source']", 'language_code': "data['language_code']", 'iswc': "data['iswc']"}), "(record_type=data['record_type'], transaction_sequence_n=\n data['transaction_sequence_n'], record_sequence_n=data[\n 'record_sequence_n'], title=data['title'], submitter_work_n=data[\n 'submitter_work_n'], writer_1_first_name=data['writer_1_first_name'],\n writer_1_last_name=data['writer_1_last_name'], writer_2_first_name=data\n ['writer_2_first_name'], writer_2_last_name=data['writer_2_last_name'],\n writer_1_ipi_base_n=ipi_base_1, writer_1_ipi_name_n=data[\n 'writer_1_ipi_name_n'], writer_2_ipi_base_n=ipi_base_2,\n writer_2_ipi_name_n=data['writer_2_ipi_name_n'], source=data['source'],\n language_code=data['language_code'], iswc=data['iswc'])\n", (11345, 12016), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((13199, 13854), 'cwr.work.ComponentRecord', 'ComponentRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'title': "data['title']", 'submitter_work_n': "data['submitter_work_n']", 'writer_1_last_name': "data['writer_1_last_name']", 'writer_1_first_name': "data['writer_1_first_name']", 'writer_2_last_name': "data['writer_2_last_name']", 'writer_2_first_name': "data['writer_2_first_name']", 'writer_1_ipi_base_n': 'ipi_base_1', 'writer_1_ipi_name_n': "data['writer_1_ipi_name_n']", 'writer_2_ipi_base_n': 'ipi_base_2', 'writer_2_ipi_name_n': "data['writer_2_ipi_name_n']", 'iswc': "data['iswc']", 'duration': "data['duration']"}), "(record_type=data['record_type'], transaction_sequence_n=\n data['transaction_sequence_n'], record_sequence_n=data[\n 'record_sequence_n'], title=data['title'], submitter_work_n=data[\n 'submitter_work_n'], writer_1_last_name=data['writer_1_last_name'],\n writer_1_first_name=data['writer_1_first_name'], writer_2_last_name=\n data['writer_2_last_name'], writer_2_first_name=data[\n 'writer_2_first_name'], writer_1_ipi_base_n=ipi_base_1,\n writer_1_ipi_name_n=data['writer_1_ipi_name_n'], writer_2_ipi_base_n=\n ipi_base_2, writer_2_ipi_name_n=data['writer_2_ipi_name_n'], iswc=data[\n 'iswc'], duration=data['duration'])\n", (13214, 13854), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((14458, 14663), 'cwr.group.GroupHeader', 'GroupHeader', ([], {'record_type': "data['record_type']", 'group_id': "data['group_id']", 'transaction_type': "data['transaction_type']", 'version_number': "data['version_number']", 'batch_request_id': "data['batch_request_id']"}), "(record_type=data['record_type'], group_id=data['group_id'],\n transaction_type=data['transaction_type'], version_number=data[\n 'version_number'], batch_request_id=data['batch_request_id'])\n", (14469, 14663), False, 'from cwr.group import Group, GroupHeader, GroupTrailer\n'), ((15221, 15468), 'cwr.group.GroupTrailer', 'GroupTrailer', ([], {'record_type': "data['record_type']", 'group_id': "data['group_id']", 'transaction_count': "data['transaction_count']", 'record_count': "data['record_count']", 'currency_indicator': 'currency_indicator', 'total_monetary_value': 'total_monetary_value'}), "(record_type=data['record_type'], group_id=data['group_id'],\n transaction_count=data['transaction_count'], record_count=data[\n 'record_count'], currency_indicator=currency_indicator,\n total_monetary_value=total_monetary_value)\n", (15233, 15468), False, 'from cwr.group import Group, GroupHeader, GroupTrailer\n'), ((16089, 16671), 'cwr.agreement.InterestedPartyForAgreementRecord', 'InterestedPartyForAgreementRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'ip_n': "data['ip_n']", 'ip_last_name': "data['ip_last_name']", 'agreement_role_code': "data['agreement_role_code']", 'ip_writer_first_name': "data['ip_writer_first_name']", 'ipi_name_n': "data['ipi_name_n']", 'ipi_base_n': 'ipi_base', 'pr_society': "data['pr_society']", 'pr_share': "data['pr_share']", 'mr_society': "data['mr_society']", 'mr_share': "data['mr_share']", 'sr_society': "data['sr_society']", 'sr_share': "data['sr_share']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], ip_n=data['ip_n'],\n ip_last_name=data['ip_last_name'], agreement_role_code=data[\n 'agreement_role_code'], ip_writer_first_name=data[\n 'ip_writer_first_name'], ipi_name_n=data['ipi_name_n'], ipi_base_n=\n ipi_base, pr_society=data['pr_society'], pr_share=data['pr_share'],\n mr_society=data['mr_society'], mr_share=data['mr_share'], sr_society=\n data['sr_society'], sr_share=data['sr_share'])\n", (16122, 16671), False, 'from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, InterestedPartyForAgreementRecord\n'), ((16965, 17447), 'cwr.interested_party.IPTerritoryOfControlRecord', 'IPTerritoryOfControlRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'ip_n': "data['ip_n']", 'inclusion_exclusion_indicator': "data['inclusion_exclusion_indicator']", 'tis_numeric_code': "data['tis_numeric_code']", 'sequence_n': "data['sequence_n']", 'pr_collection_share': "data['pr_collection_share']", 'mr_collection_share': "data['mr_collection_share']", 'shares_change': "data['shares_change']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], ip_n=data['ip_n'],\n inclusion_exclusion_indicator=data['inclusion_exclusion_indicator'],\n tis_numeric_code=data['tis_numeric_code'], sequence_n=data['sequence_n'\n ], pr_collection_share=data['pr_collection_share'], mr_collection_share\n =data['mr_collection_share'], shares_change=data['shares_change'])\n", (16991, 17447), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((18443, 18696), 'cwr.work.InstrumentationDetailRecord', 'InstrumentationDetailRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'instrument_code': "data['instrument_code']", 'number_players': "data['number_players']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], instrument_code=data[\n 'instrument_code'], number_players=data['number_players'])\n", (18470, 18696), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((19246, 19601), 'cwr.work.InstrumentationSummaryRecord', 'InstrumentationSummaryRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'number_voices': "data['number_voices']", 'standard_instrumentation_type': "data['standard_instrumentation_type']", 'instrumentation_description': "data['instrumentation_description']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], number_voices=data[\n 'number_voices'], standard_instrumentation_type=data[\n 'standard_instrumentation_type'], instrumentation_description=data[\n 'instrumentation_description'])\n", (19274, 19601), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((19820, 20242), 'cwr.acknowledgement.MessageRecord', 'MessageRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'message_type': "data['message_type']", 'message_text': "data['message_text']", 'original_record_sequence_n': "data['original_record_sequence_n']", 'message_record_type': "data['message_record_type']", 'message_level': "data['message_level']", 'validation_n': "data['validation_n']"}), "(record_type=data['record_type'], transaction_sequence_n=data[\n 'transaction_sequence_n'], record_sequence_n=data['record_sequence_n'],\n message_type=data['message_type'], message_text=data['message_text'],\n original_record_sequence_n=data['original_record_sequence_n'],\n message_record_type=data['message_record_type'], message_level=data[\n 'message_level'], validation_n=data['validation_n'])\n", (19833, 20242), False, 'from cwr.acknowledgement import AcknowledgementRecord, MessageRecord\n'), ((21411, 21812), 'cwr.work.PerformingArtistRecord', 'PerformingArtistRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'performing_artist_last_name': "data['performing_artist_last_name']", 'performing_artist_first_name': 'performing_artist_first_name', 'performing_artist_ipi_name_n': 'performing_artist_ipi_name_n', 'performing_artist_ipi_base_n': 'ipi_base'}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'],\n performing_artist_last_name=data['performing_artist_last_name'],\n performing_artist_first_name=performing_artist_first_name,\n performing_artist_ipi_name_n=performing_artist_ipi_name_n,\n performing_artist_ipi_base_n=ipi_base)\n", (21433, 21812), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((22455, 22857), 'cwr.interested_party.PublisherForWriterRecord', 'PublisherForWriterRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'publisher_ip_n': "data['publisher_ip_n']", 'publisher_name': 'publisher_name', 'writer_ip_n': "data['writer_ip_n']", 'submitter_agreement_n': "data['submitter_agreement_n']", 'society_assigned_agreement_n': "data['society_assigned_agreement_n']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], publisher_ip_n=data[\n 'publisher_ip_n'], publisher_name=publisher_name, writer_ip_n=data[\n 'writer_ip_n'], submitter_agreement_n=data['submitter_agreement_n'],\n society_assigned_agreement_n=data['society_assigned_agreement_n'])\n", (22479, 22857), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((23584, 24174), 'cwr.work.RecordingDetailRecord', 'RecordingDetailRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'first_release_date': "data['first_release_date']", 'first_release_duration': "data['first_release_duration']", 'first_album_title': "data['first_album_title']", 'first_album_label': "data['first_album_label']", 'first_release_catalog_n': "data['first_release_catalog_n']", 'ean': "data['ean']", 'isrc': "data['isrc']", 'recording_format': "data['recording_format']", 'recording_technique': "data['recording_technique']", 'media_type': 'media_type'}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], first_release_date=data[\n 'first_release_date'], first_release_duration=data[\n 'first_release_duration'], first_album_title=data['first_album_title'],\n first_album_label=data['first_album_label'], first_release_catalog_n=\n data['first_release_catalog_n'], ean=data['ean'], isrc=data['isrc'],\n recording_format=data['recording_format'], recording_technique=data[\n 'recording_technique'], media_type=media_type)\n", (23605, 24174), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((25478, 25504), 'cwr.file.CWRFile', 'CWRFile', (['tag', 'transmission'], {}), '(tag, transmission)\n', (25485, 25504), False, 'from cwr.file import CWRFile, FileTag\n'), ((26420, 26457), 'cwr.transmission.Transmission', 'Transmission', (['header', 'trailer', 'groups'], {}), '(header, trailer, groups)\n', (26432, 26457), False, 'from cwr.transmission import Transmission, TransmissionTrailer, TransmissionHeader\n'), ((27626, 27662), 'cwr.group.Group', 'Group', (['header', 'trailer', 'transactions'], {}), '(header, trailer, transactions)\n', (27631, 27662), False, 'from cwr.group import Group, GroupHeader, GroupTrailer\n'), ((27855, 28147), 'cwr.transmission.TransmissionHeader', 'TransmissionHeader', ([], {'record_type': "data['record_type']", 'sender_id': "data['sender_id']", 'sender_name': "data['sender_name']", 'sender_type': "data['sender_type']", 'creation_date_time': "data['creation_date_time']", 'transmission_date': "data['transmission_date']", 'edi_standard': "data['edi_standard']"}), "(record_type=data['record_type'], sender_id=data[\n 'sender_id'], sender_name=data['sender_name'], sender_type=data[\n 'sender_type'], creation_date_time=data['creation_date_time'],\n transmission_date=data['transmission_date'], edi_standard=data[\n 'edi_standard'])\n", (27873, 28147), False, 'from cwr.transmission import Transmission, TransmissionTrailer, TransmissionHeader\n'), ((28694, 28868), 'cwr.transmission.TransmissionTrailer', 'TransmissionTrailer', ([], {'record_type': "data['record_type']", 'group_count': "data['group_count']", 'transaction_count': "data['transaction_count']", 'record_count': "data['record_count']"}), "(record_type=data['record_type'], group_count=data[\n 'group_count'], transaction_count=data['transaction_count'],\n record_count=data['record_count'])\n", (28713, 28868), False, 'from cwr.transmission import Transmission, TransmissionTrailer, TransmissionHeader\n'), ((29616, 30863), 'cwr.work.WorkRecord', 'WorkRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'submitter_work_n': "data['submitter_work_n']", 'title': "data['title']", 'version_type': "data['version_type']", 'musical_work_distribution_category': "data['musical_work_distribution_category']", 'date_publication_printed_edition': "data['date_publication_printed_edition']", 'text_music_relationship': "data['text_music_relationship']", 'language_code': "data['language_code']", 'copyright_number': "data['copyright_number']", 'copyright_date': "data['copyright_date']", 'music_arrangement': "data['music_arrangement']", 'lyric_adaptation': "data['lyric_adaptation']", 'excerpt_type': "data['excerpt_type']", 'composite_type': "data['composite_type']", 'composite_component_count': "data['composite_component_count']", 'iswc': "data['iswc']", 'work_type': "data['work_type']", 'duration': "data['duration']", 'catalogue_number': 'catalogue_number', 'opus_number': 'opus_number', 'contact_id': "data['contact_id']", 'contact_name': "data['contact_name']", 'recorded_indicator': "data['recorded_indicator']", 'priority_flag': 'priority_flag', 'exceptional_clause': 'exceptional_clause', 'grand_rights_indicator': "data['grand_rights_indicator']"}), "(record_type=data['record_type'], transaction_sequence_n=data[\n 'transaction_sequence_n'], record_sequence_n=data['record_sequence_n'],\n submitter_work_n=data['submitter_work_n'], title=data['title'],\n version_type=data['version_type'], musical_work_distribution_category=\n data['musical_work_distribution_category'],\n date_publication_printed_edition=data[\n 'date_publication_printed_edition'], text_music_relationship=data[\n 'text_music_relationship'], language_code=data['language_code'],\n copyright_number=data['copyright_number'], copyright_date=data[\n 'copyright_date'], music_arrangement=data['music_arrangement'],\n lyric_adaptation=data['lyric_adaptation'], excerpt_type=data[\n 'excerpt_type'], composite_type=data['composite_type'],\n composite_component_count=data['composite_component_count'], iswc=data[\n 'iswc'], work_type=data['work_type'], duration=data['duration'],\n catalogue_number=catalogue_number, opus_number=opus_number, contact_id=\n data['contact_id'], contact_name=data['contact_name'],\n recorded_indicator=data['recorded_indicator'], priority_flag=\n priority_flag, exceptional_clause=exceptional_clause,\n grand_rights_indicator=data['grand_rights_indicator'])\n", (29626, 30863), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((31783, 32377), 'cwr.work.WorkOriginRecord', 'WorkOriginRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'intended_purpose': "data['intended_purpose']", 'production_title': "data['production_title']", 'cd_identifier': "data['cd_identifier']", 'cut_number': "data['cut_number']", 'library': "data['library']", 'bltvr': "data['bltvr']", 'visan': "data['visan']", 'production_n': "data['production_n']", 'episode_title': "data['episode_title']", 'episode_n': "data['episode_n']", 'year_production': "data['year_production']", 'audio_visual_key': "data['audio_visual_key']"}), "(record_type=data['record_type'], transaction_sequence_n=\n data['transaction_sequence_n'], record_sequence_n=data[\n 'record_sequence_n'], intended_purpose=data['intended_purpose'],\n production_title=data['production_title'], cd_identifier=data[\n 'cd_identifier'], cut_number=data['cut_number'], library=data['library'\n ], bltvr=data['bltvr'], visan=data['visan'], production_n=data[\n 'production_n'], episode_title=data['episode_title'], episode_n=data[\n 'episode_n'], year_production=data['year_production'], audio_visual_key\n =data['audio_visual_key'])\n", (31799, 32377), False, 'from cwr.work import RecordingDetailRecord, ComponentRecord, AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, WorkRecord\n'), ((33247, 33490), 'cwr.interested_party.Writer', 'Writer', ([], {'ip_n': "data['ip_n']", 'personal_number': "data['personal_number']", 'ipi_base_n': 'ipi_base_n', 'writer_first_name': "data['writer_first_name']", 'writer_last_name': "data['writer_last_name']", 'tax_id': "data['tax_id']", 'ipi_name_n': "data['ipi_name_n']"}), "(ip_n=data['ip_n'], personal_number=data['personal_number'],\n ipi_base_n=ipi_base_n, writer_first_name=data['writer_first_name'],\n writer_last_name=data['writer_last_name'], tax_id=data['tax_id'],\n ipi_name_n=data['ipi_name_n'])\n", (33253, 33490), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((34016, 34691), 'cwr.interested_party.WriterRecord', 'WriterRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'writer': 'writer', 'writer_designation': "data['writer_designation']", 'work_for_hire': "data['work_for_hire']", 'writer_unknown': "data['writer_unknown']", 'reversionary': "data['reversionary']", 'first_recording_refusal': "data['first_recording_refusal']", 'usa_license': 'usa_license', 'pr_society': "data['pr_society']", 'pr_ownership_share': "data['pr_ownership_share']", 'mr_society': "data['mr_society']", 'mr_ownership_share': "data['mr_ownership_share']", 'sr_society': "data['sr_society']", 'sr_ownership_share': "data['sr_ownership_share']"}), "(record_type=data['record_type'], transaction_sequence_n=data[\n 'transaction_sequence_n'], record_sequence_n=data['record_sequence_n'],\n writer=writer, writer_designation=data['writer_designation'],\n work_for_hire=data['work_for_hire'], writer_unknown=data[\n 'writer_unknown'], reversionary=data['reversionary'],\n first_recording_refusal=data['first_recording_refusal'], usa_license=\n usa_license, pr_society=data['pr_society'], pr_ownership_share=data[\n 'pr_ownership_share'], mr_society=data['mr_society'],\n mr_ownership_share=data['mr_ownership_share'], sr_society=data[\n 'sr_society'], sr_ownership_share=data['sr_ownership_share'])\n", (34028, 34691), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((35351, 35657), 'cwr.non_roman_alphabet.NonRomanAlphabetAgreementPartyRecord', 'NonRomanAlphabetAgreementPartyRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'ip_name': "data['ip_name']", 'ip_writer_name': "data['ip_writer_name']", 'ip_n': "data['ip_n']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], ip_name=data['ip_name'],\n ip_writer_name=data['ip_writer_name'], ip_n=data['ip_n'], language_code\n =data['language_code'])\n", (35387, 35657), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((35934, 36260), 'cwr.non_roman_alphabet.NonRomanAlphabetOtherWriterRecord', 'NonRomanAlphabetOtherWriterRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'writer_first_name': "data['writer_first_name']", 'writer_name': "data['writer_name']", 'position': "data['position']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], writer_first_name=data[\n 'writer_first_name'], writer_name=data['writer_name'], position=data[\n 'position'], language_code=data['language_code'])\n", (35967, 36260), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((36830, 37398), 'cwr.non_roman_alphabet.NonRomanAlphabetPerformanceDataRecord', 'NonRomanAlphabetPerformanceDataRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'performing_artist_first_name': "data['performing_artist_first_name']", 'performing_artist_name': "data['performing_artist_name']", 'performing_artist_ipi_name_n': "data['performing_artist_ipi_name_n']", 'performing_artist_ipi_base_n': 'ipi_base', 'language_code': "data['language_code']", 'performance_language': "data['performance_language']", 'performance_dialect': "data['performance_dialect']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'],\n performing_artist_first_name=data['performing_artist_first_name'],\n performing_artist_name=data['performing_artist_name'],\n performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],\n performing_artist_ipi_base_n=ipi_base, language_code=data[\n 'language_code'], performance_language=data['performance_language'],\n performance_dialect=data['performance_dialect'])\n", (36867, 37398), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((37699, 38031), 'cwr.non_roman_alphabet.NonRomanAlphabetPublisherNameRecord', 'NonRomanAlphabetPublisherNameRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'publisher_sequence_n': "data['publisher_sequence_n']", 'ip_n': "data['ip_n']", 'publisher_name': "data['publisher_name']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], publisher_sequence_n=data[\n 'publisher_sequence_n'], ip_n=data['ip_n'], publisher_name=data[\n 'publisher_name'], language_code=data['language_code'])\n", (37734, 38031), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((38295, 38556), 'cwr.non_roman_alphabet.NonRomanAlphabetTitleRecord', 'NonRomanAlphabetTitleRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'title': "data['title']", 'title_type': "data['title_type']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], title=data['title'],\n title_type=data['title_type'], language_code=data['language_code'])\n", (38322, 38556), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((39050, 39279), 'cwr.non_roman_alphabet.NonRomanAlphabetWorkRecord', 'NonRomanAlphabetWorkRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'title': "data['title']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], title=data['title'],\n language_code=data['language_code'])\n", (39076, 39279), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((39736, 40063), 'cwr.non_roman_alphabet.NonRomanAlphabetWriterNameRecord', 'NonRomanAlphabetWriterNameRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'writer_first_name': "data['writer_first_name']", 'writer_last_name': "data['writer_last_name']", 'ip_n': "data['ip_n']", 'language_code': "data['language_code']"}), "(record_type=data['record_type'],\n transaction_sequence_n=data['transaction_sequence_n'],\n record_sequence_n=data['record_sequence_n'], writer_first_name=data[\n 'writer_first_name'], writer_last_name=data['writer_last_name'], ip_n=\n data['ip_n'], language_code=data['language_code'])\n", (39768, 40063), False, 'from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord\n'), ((41105, 41251), 'cwr.interested_party.Publisher', 'Publisher', ([], {'ip_n': "data['ip_n']", 'publisher_name': "data['publisher_name']", 'ipi_name_n': "data['ipi_name_n']", 'ipi_base_n': 'ipi_base', 'tax_id': "data['tax_id']"}), "(ip_n=data['ip_n'], publisher_name=data['publisher_name'],\n ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base, tax_id=data['tax_id'])\n", (41114, 41251), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((42534, 43444), 'cwr.interested_party.PublisherRecord', 'PublisherRecord', ([], {'record_type': "data['record_type']", 'transaction_sequence_n': "data['transaction_sequence_n']", 'record_sequence_n': "data['record_sequence_n']", 'publisher': 'publisher', 'publisher_sequence_n': "data['publisher_sequence_n']", 'submitter_agreement_n': "data['submitter_agreement_n']", 'publisher_type': "data['publisher_type']", 'publisher_unknown': "data['publisher_unknown']", 'pr_society': "data['pr_society']", 'pr_ownership_share': "data['pr_ownership_share']", 'mr_society': "data['mr_society']", 'mr_ownership_share': "data['mr_ownership_share']", 'sr_society': "data['sr_society']", 'sr_ownership_share': "data['sr_ownership_share']", 'special_agreements': 'special_agreements', 'first_recording_refusal': 'first_recording_refusal', 'international_standard_code': 'international_standard_code', 'society_assigned_agreement_n': 'society_assigned_agreement_n', 'agreement_type': 'agreement_type', 'usa_license': 'usa_license'}), "(record_type=data['record_type'], transaction_sequence_n=\n data['transaction_sequence_n'], record_sequence_n=data[\n 'record_sequence_n'], publisher=publisher, publisher_sequence_n=data[\n 'publisher_sequence_n'], submitter_agreement_n=data[\n 'submitter_agreement_n'], publisher_type=data['publisher_type'],\n publisher_unknown=data['publisher_unknown'], pr_society=data[\n 'pr_society'], pr_ownership_share=data['pr_ownership_share'],\n mr_society=data['mr_society'], mr_ownership_share=data[\n 'mr_ownership_share'], sr_society=data['sr_society'],\n sr_ownership_share=data['sr_ownership_share'], special_agreements=\n special_agreements, first_recording_refusal=first_recording_refusal,\n international_standard_code=international_standard_code,\n society_assigned_agreement_n=society_assigned_agreement_n,\n agreement_type=agreement_type, usa_license=usa_license)\n", (42549, 43444), False, 'from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord\n'), ((43801, 43887), 'cwr.table_value.TableValue', 'TableValue', ([], {'code': "data['code']", 'name': "data['name']", 'description': "data['description']"}), "(code=data['code'], name=data['name'], description=data[\n 'description'])\n", (43811, 43887), False, 'from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue\n'), ((44117, 44311), 'cwr.table_value.MediaTypeValue', 'MediaTypeValue', ([], {'code': "data['code']", 'name': "data['name']", 'media_type': "data['media_type']", 'duration_max': "data['duration_max']", 'works_max': "data['works_max']", 'fragments_max': "data['fragments_max']"}), "(code=data['code'], name=data['name'], media_type=data[\n 'media_type'], duration_max=data['duration_max'], works_max=data[\n 'works_max'], fragments_max=data['fragments_max'])\n", (44131, 44311), False, 'from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue\n'), ((44636, 44749), 'cwr.table_value.InstrumentValue', 'InstrumentValue', ([], {'code': "data['code']", 'name': "data['name']", 'family': "data['family']", 'description': "data['description']"}), "(code=data['code'], name=data['name'], family=data['family'],\n description=data['description'])\n", (44651, 44749), False, 'from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue\n'), ((45007, 45103), 'cwr.file.FileTag', 'FileTag', (["data['year']", "data['sequence_n']", "data['sender']", "data['receiver']", "data['version']"], {}), "(data['year'], data['sequence_n'], data['sender'], data['receiver'],\n data['version'])\n", (45014, 45103), False, 'from cwr.file import CWRFile, FileTag\n'), ((45358, 45405), 'cwr.other.AVIKey', 'AVIKey', (["data['society_code']", "data['av_number']"], {}), "(data['society_code'], data['av_number'])\n", (45364, 45405), False, 'from cwr.other import AVIKey, VISAN\n')] |
import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
class SimpleUserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
)
class UserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
"signed_up_from",
"date_joined",
"username",
"email",
"created",
"modified",
)
class UserTokenSerializer(TokenSerializer):
user = UserReadSerializer()
class Meta:
model = Token
fields = ["key", "user"]
# TODO - this view and serializer is on hold as you figure out registration (later)
class UserCreateSerializer(ModelSerializer):
username = CharField(validators=[UniqueValidator(queryset=User.objects.all())])
# need to make email optional ... prob should think through signup form a little
email = CharField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
password = CharField(write_only=True, min_length=8)
signed_up_from = CharField(
write_only=True, min_length=8, required=False, default="", trim_whitespace=True
)
timezone_string = ChoiceField(
choices=pytz.all_timezones, required=False, default="US/Eastern"
)
class Meta:
model = User
fields = ["username", "email", "password", "signed_up_from", "timezone_string"]
# TODO test - does this work with just username / no email, etc.
def create(self, validated_data):
username = validated_data.pop("username")
password = validated_data.pop("password")
is_betterself_user = False
if validated_data["signed_up_from"] == "betterself":
is_betterself_user = True
validated_data["is_betterself_user"] = is_betterself_user
user = User.objects.create(username=username, **validated_data)
user.set_password(password)
user.save()
return user
class UserDeleteSerializer(Serializer):
# most of this is actually redundant, i don't need to have a validation step, but i do this
# out of paranoia reasons that someone may delete their account by mistake
password = CharField()
user = HiddenField(default=CurrentUserDefault())
uuid = UUIDField()
def validate(self, data):
user = data["user"]
validated_password = check_password(data["password"], user.password)
if not validated_password:
raise ValidationError("Invalid Password Entered")
validated_uuid = str(user.uuid) == str(data["uuid"])
if not validated_uuid:
raise ValidationError("Invalid UUID", str(user.uuid))
validate_user = user.username != "<EMAIL>"
if not validate_user:
raise ValidationError(
f"This is a protected user and cannot be deleted. {user.username}"
)
return data
| [
"rest_framework.fields.ChoiceField",
"django.contrib.auth.hashers.check_password",
"rest_framework.fields.CurrentUserDefault",
"rest_framework.fields.UUIDField",
"rest_framework.exceptions.ValidationError",
"rest_framework.fields.CharField",
"open.users.models.User.objects.create",
"open.users.models.User.objects.all"
]
| [((1538, 1578), 'rest_framework.fields.CharField', 'CharField', ([], {'write_only': '(True)', 'min_length': '(8)'}), '(write_only=True, min_length=8)\n', (1547, 1578), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((1600, 1694), 'rest_framework.fields.CharField', 'CharField', ([], {'write_only': '(True)', 'min_length': '(8)', 'required': '(False)', 'default': '""""""', 'trim_whitespace': '(True)'}), "(write_only=True, min_length=8, required=False, default='',\n trim_whitespace=True)\n", (1609, 1694), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((1727, 1804), 'rest_framework.fields.ChoiceField', 'ChoiceField', ([], {'choices': 'pytz.all_timezones', 'required': '(False)', 'default': '"""US/Eastern"""'}), "(choices=pytz.all_timezones, required=False, default='US/Eastern')\n", (1738, 1804), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((2738, 2749), 'rest_framework.fields.CharField', 'CharField', ([], {}), '()\n', (2747, 2749), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((2814, 2825), 'rest_framework.fields.UUIDField', 'UUIDField', ([], {}), '()\n', (2823, 2825), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((2372, 2428), 'open.users.models.User.objects.create', 'User.objects.create', ([], {'username': 'username'}), '(username=username, **validated_data)\n', (2391, 2428), False, 'from open.users.models import User\n'), ((2914, 2961), 'django.contrib.auth.hashers.check_password', 'check_password', (["data['password']", 'user.password'], {}), "(data['password'], user.password)\n", (2928, 2961), False, 'from django.contrib.auth.hashers import check_password\n'), ((2781, 2801), 'rest_framework.fields.CurrentUserDefault', 'CurrentUserDefault', ([], {}), '()\n', (2799, 2801), False, 'from rest_framework.fields import CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField\n'), ((3016, 3059), 'rest_framework.exceptions.ValidationError', 'ValidationError', (['"""Invalid Password Entered"""'], {}), "('Invalid Password Entered')\n", (3031, 3059), False, 'from rest_framework.exceptions import ValidationError\n'), ((3319, 3407), 'rest_framework.exceptions.ValidationError', 'ValidationError', (['f"""This is a protected user and cannot be deleted. {user.username}"""'], {}), "(\n f'This is a protected user and cannot be deleted. {user.username}')\n", (3334, 3407), False, 'from rest_framework.exceptions import ValidationError\n'), ((1305, 1323), 'open.users.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1321, 1323), False, 'from open.users.models import User\n'), ((1480, 1498), 'open.users.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1496, 1498), False, 'from open.users.models import User\n')] |
"""Automated speech recognition tests."""
import os
import sys
import unittest
from pathlib import Path
import requests
from rhasspyhermes.asr import AsrTextCaptured
from rhasspyhermes.nlu import NluIntent
class AsrEnglishTests(unittest.TestCase):
"""Test automated speech recognition (English)"""
def setUp(self):
self.http_host = os.environ.get("RHASSPY_HTTP_HOST", "localhost")
self.http_port = os.environ.get("RHASSPY_HTTP_PORT", 12101)
self.wav_bytes = Path("wav/en/turn_on_the_living_room_lamp.wav").read_bytes()
def api_url(self, fragment):
return f"http://{self.http_host}:{self.http_port}/api/{fragment}"
def check_status(self, response):
if response.status_code != 200:
print(response.text, file=sys.stderr)
response.raise_for_status()
def test_http_speech_to_text(self):
"""Test speech-to-text HTTP endpoint"""
response = requests.post(self.api_url("speech-to-text"), data=self.wav_bytes)
self.check_status(response)
text = response.content.decode()
self.assertEqual(text, "turn on the living room lamp")
def test_http_speech_to_text_json(self):
"""Text speech-to-text HTTP endpoint (Rhasspy JSON format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
headers={"Accept": "application/json"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["text"], "turn on the living room lamp")
def test_http_speech_to_text_hermes(self):
"""Text speech-to-text HTTP endpoint (Hermes format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "textCaptured")
text_captured = AsrTextCaptured.from_dict(result["value"])
self.assertEqual(text_captured.text, "turn on the living room lamp")
def test_http_speech_to_intent(self):
response = requests.post(self.api_url("speech-to-intent"), data=self.wav_bytes)
self.check_status(response)
result = response.json()
self.assertEqual(result["intent"]["name"], "ChangeLightState")
self.assertEqual(result["text"], "turn on the living room lamp")
self.assertEqual(result["slots"]["name"], "living room lamp")
self.assertEqual(result["slots"]["state"], "on")
def test_http_speech_to_intent_hermes(self):
response = requests.post(
self.api_url("speech-to-intent"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "intent")
nlu_intent = NluIntent.from_dict(result["value"])
self.assertEqual(nlu_intent.raw_input, "turn on the living room lamp")
self.assertEqual(nlu_intent.input, "turn on the living room lamp")
# Intent name and slots
self.assertEqual(nlu_intent.intent.intent_name, "ChangeLightState")
slots_by_name = {slot.slot_name: slot for slot in nlu_intent.slots}
self.assertIn("name", slots_by_name)
self.assertEqual(slots_by_name["name"].value["value"], "living room lamp")
self.assertIn("state", slots_by_name)
self.assertEqual(slots_by_name["state"].value["value"], "on")
| [
"pathlib.Path",
"os.environ.get",
"rhasspyhermes.nlu.NluIntent.from_dict",
"rhasspyhermes.asr.AsrTextCaptured.from_dict"
]
| [((353, 401), 'os.environ.get', 'os.environ.get', (['"""RHASSPY_HTTP_HOST"""', '"""localhost"""'], {}), "('RHASSPY_HTTP_HOST', 'localhost')\n", (367, 401), False, 'import os\n'), ((427, 469), 'os.environ.get', 'os.environ.get', (['"""RHASSPY_HTTP_PORT"""', '(12101)'], {}), "('RHASSPY_HTTP_PORT', 12101)\n", (441, 469), False, 'import os\n'), ((2010, 2052), 'rhasspyhermes.asr.AsrTextCaptured.from_dict', 'AsrTextCaptured.from_dict', (["result['value']"], {}), "(result['value'])\n", (2035, 2052), False, 'from rhasspyhermes.asr import AsrTextCaptured\n'), ((2966, 3002), 'rhasspyhermes.nlu.NluIntent.from_dict', 'NluIntent.from_dict', (["result['value']"], {}), "(result['value'])\n", (2985, 3002), False, 'from rhasspyhermes.nlu import NluIntent\n'), ((495, 542), 'pathlib.Path', 'Path', (['"""wav/en/turn_on_the_living_room_lamp.wav"""'], {}), "('wav/en/turn_on_the_living_room_lamp.wav')\n", (499, 542), False, 'from pathlib import Path\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
| [
"torch.nn.AvgPool1d"
]
| [((455, 503), 'torch.nn.AvgPool1d', 'nn.AvgPool1d', ([], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(2)'}), '(kernel_size=4, stride=2, padding=2)\n', (467, 503), True, 'import torch.nn as nn\n')] |
from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here.
def view_schedule(request):
all_posts = Post.objects.all().order_by('pub_date')
page = int(request.GET.get('p', 1))
pagenator = Paginator(all_posts, 5)
posts = pagenator.get_page(page)
return render(request, 'schedule/view_schedule.html', {'posts': posts})
def write_schedule(request):
if request.method == "POST":
form = ScheduleForm(request.POST)
if form.is_valid():
# form의 모든 validators 호출 유효성 검증 수행
# user_id = request.session.get('user')
# user = User.objects.get(pk=user_id)
schedule = Post()
schedule.title = form.cleaned_data['title']
# # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data)
# # 검증에 실패시 form.error 에 오류 정보를 저장
schedule.username = form.cleaned_data['username']
schedule.pub_date = form.cleaned_data['pub_date']
schedule.save()
return redirect('schedule:view_schedule')
else:
form = ScheduleForm()
return render(request, 'schedule/write_schedule.html', {'form': form})
def delete(request, posts_id):
post = Post.objects.get(id=posts_id)
post.delete()
posts = Post.objects.all().order_by('-id')
return render(request, 'schedule/view_schedule.html', {'posts': posts}) | [
"django.shortcuts.render",
"django.shortcuts.redirect",
"django.core.paginator.Paginator"
]
| [((314, 337), 'django.core.paginator.Paginator', 'Paginator', (['all_posts', '(5)'], {}), '(all_posts, 5)\n', (323, 337), False, 'from django.core.paginator import Paginator\n'), ((386, 450), 'django.shortcuts.render', 'render', (['request', '"""schedule/view_schedule.html"""', "{'posts': posts}"], {}), "(request, 'schedule/view_schedule.html', {'posts': posts})\n", (392, 450), False, 'from django.shortcuts import render, redirect\n'), ((1186, 1249), 'django.shortcuts.render', 'render', (['request', '"""schedule/write_schedule.html"""', "{'form': form}"], {}), "(request, 'schedule/write_schedule.html', {'form': form})\n", (1192, 1249), False, 'from django.shortcuts import render, redirect\n'), ((1399, 1463), 'django.shortcuts.render', 'render', (['request', '"""schedule/view_schedule.html"""', "{'posts': posts}"], {}), "(request, 'schedule/view_schedule.html', {'posts': posts})\n", (1405, 1463), False, 'from django.shortcuts import render, redirect\n'), ((1098, 1132), 'django.shortcuts.redirect', 'redirect', (['"""schedule:view_schedule"""'], {}), "('schedule:view_schedule')\n", (1106, 1132), False, 'from django.shortcuts import render, redirect\n')] |
import pandas as pd
from pandas.api.types import is_numeric_dtype
from grimer.utils import print_log
class Metadata:
valid_types = ["categorical", "numeric"]
default_type = "categorical"
def __init__(self, metadata_file, samples: list=[]):
# Read metadata and let pandas guess dtypes, index as str
self.data = pd.read_table(metadata_file, sep='\t', header=0, skiprows=0, index_col=0, dtype={0:str})
# Enforce string index
self.data.index = self.data.index.astype('str')
# Define all COLUMN TYPES as default
self.types = pd.Series(self.default_type, index=self.data.columns)
# Set types
if str(self.data.index[0]).startswith("#"):
# types defined on file
self.set_hard_types()
else:
# guessed types from read_table
self.types[self.data.dtypes.map(is_numeric_dtype)] = "numeric"
# Convert datatypes to adequate numeric values (int, float)
self.data = self.data.convert_dtypes(infer_objects=False, convert_string=False)
# Re-convert everython to object to standardize (int64 NA is not seriazable on bokeh)
self.data = self.data.astype("object")
# Remove empty fields
null_cols = self.data.isna().all(axis=0)
if any(null_cols):
self.data = self.data.loc[:, ~null_cols]
self.types = self.types[~null_cols]
print_log(str(sum(null_cols)) + " fields removed without valid values")
# Convert NaN on categorical to ""
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].fillna('')
# Remove names
self.data.index.names = [None]
self.types.name = None
# sort and filter by given samples
if samples:
self.data = self.data.reindex(samples)
# Check if matched metadata and samples
null_rows = self.data.isna().all(axis=1)
if any(null_rows):
#self.data = self.data.loc[~null_rows, :]
print_log(str(sum(null_rows)) + " samples without valid metadata")
def __repr__(self):
args = ['{}={}'.format(k, repr(v)) for (k, v) in vars(self).items()]
return 'Metadata({})'.format(', '.join(args))
def set_hard_types(self):
# Get values defined on the first row
self.types = self.data.iloc[0]
# Drop row with types from main data
self.data.drop(self.types.name, inplace=True)
# Validate declared types
idx_valid = self.types.isin(self.valid_types)
if not idx_valid.all():
print_log("Invalid metadata types replaced by: " + self.default_type)
self.types[~idx_valid] = self.default_type
# Enforce column type on dataframe
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].astype(str)
self.data[self.types[self.types == "numeric"].index] = self.data[self.types[self.types == "numeric"].index].apply(pd.to_numeric)
def get_col_headers(self):
return self.data.columns
def get_data(self, metadata_type: str=None):
if metadata_type is not None:
return self.data[self.types[self.types == metadata_type].index]
else:
return self.data
def get_col(self, col):
return self.data[col]
def get_unique_values(self, col):
return sorted(self.get_col(col).dropna().unique())
def get_formatted_unique_values(self, col):
if self.types[col] == "categorical":
return self.get_unique_values(col)
else:
return list(map('{:.16g}'.format, self.get_unique_values(col)))
def get_type(self, col):
return self.types[col]
def get_subset(self, column, value):
return self.data[self.data[column] == value]
| [
"pandas.Series",
"grimer.utils.print_log",
"pandas.read_table"
]
| [((341, 436), 'pandas.read_table', 'pd.read_table', (['metadata_file'], {'sep': '"""\t"""', 'header': '(0)', 'skiprows': '(0)', 'index_col': '(0)', 'dtype': '{(0): str}'}), "(metadata_file, sep='\\t', header=0, skiprows=0, index_col=0,\n dtype={(0): str})\n", (354, 436), True, 'import pandas as pd\n'), ((585, 638), 'pandas.Series', 'pd.Series', (['self.default_type'], {'index': 'self.data.columns'}), '(self.default_type, index=self.data.columns)\n', (594, 638), True, 'import pandas as pd\n'), ((2654, 2723), 'grimer.utils.print_log', 'print_log', (["('Invalid metadata types replaced by: ' + self.default_type)"], {}), "('Invalid metadata types replaced by: ' + self.default_type)\n", (2663, 2723), False, 'from grimer.utils import print_log\n')] |
from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
| [
"allennlp.common.checks.ConfigurationError"
]
| [((4970, 5122), 'allennlp.common.checks.ConfigurationError', 'ConfigurationError', (['f"""You configured the trainer to use the {e.args[0]} metric for early stopping, but the model did not produce that metric."""'], {}), "(\n f'You configured the trainer to use the {e.args[0]} metric for early stopping, but the model did not produce that metric.'\n )\n", (4988, 5122), False, 'from allennlp.common.checks import ConfigurationError\n'), ((1979, 2035), 'allennlp.common.checks.ConfigurationError', 'ConfigurationError', (['"""metric_name must start with + or -"""'], {}), "('metric_name must start with + or -')\n", (1997, 2035), False, 'from allennlp.common.checks import ConfigurationError\n')] |
from authors.apps.utils.renderers import AppJSONRenderer
import json
from rest_framework.renderers import JSONRenderer
class UserProfileJSONRenderer(AppJSONRenderer):
name = 'profile'
class UserProfileListRenderer(JSONRenderer):
"""
Returns profiles of existing users
"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
""" present a list of
user profiles in json format
"""
return json.dumps({
'profiles':data
})
class ReadStatsJsonRenderer(AppJSONRenderer):
name = 'read_stats'
| [
"json.dumps"
]
| [((482, 512), 'json.dumps', 'json.dumps', (["{'profiles': data}"], {}), "({'profiles': data})\n", (492, 512), False, 'import json\n')] |
### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py)
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants as mllog_const
mllogger = mllog.get_mllogger()
mllog.config(
filename=(os.getenv("COMPLIANCE_FILE") or "mlperf_compliance.log"),
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
def ssd_print(*args, sync=True, **kwargs):
use_cuda = os.getenv('USE_CUDA')
if sync and use_cuda=='True':
barrier()
if get_rank() == 0:
kwargs['stack_offset'] = 2
mllogger.event(*args, **kwargs)
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = os.getenv('RANK', os.getenv('LOCAL_RANK', 0))
return rank
def broadcast_seeds(seed, device):
if torch.distributed.is_initialized():
seeds_tensor = torch.LongTensor([seed]).to(device)
torch.distributed.broadcast(seeds_tensor, 0)
seed = seeds_tensor.item()
return seed
| [
"torch.cuda.FloatTensor",
"os.getenv",
"torch.LongTensor",
"torch.distributed.is_initialized",
"torch.cuda.synchronize",
"os.path.realpath",
"torch.distributed.broadcast",
"mlperf_logging.mllog.get_mllogger",
"torch.distributed.get_rank"
]
| [((933, 953), 'mlperf_logging.mllog.get_mllogger', 'mllog.get_mllogger', ([], {}), '()\n', (951, 953), False, 'from mlperf_logging import mllog\n'), ((1175, 1196), 'os.getenv', 'os.getenv', (['"""USE_CUDA"""'], {}), "('USE_CUDA')\n", (1184, 1196), False, 'import os\n'), ((1564, 1598), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1596, 1598), False, 'import torch\n'), ((1815, 1849), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1847, 1849), False, 'import torch\n'), ((2025, 2059), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (2057, 2059), False, 'import torch\n'), ((1672, 1696), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1694, 1696), False, 'import torch\n'), ((1866, 1894), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1892, 1894), False, 'import torch\n'), ((2128, 2172), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['seeds_tensor', '(0)'], {}), '(seeds_tensor, 0)\n', (2155, 2172), False, 'import torch\n'), ((982, 1010), 'os.getenv', 'os.getenv', (['"""COMPLIANCE_FILE"""'], {}), "('COMPLIANCE_FILE')\n", (991, 1010), False, 'import os\n'), ((1637, 1662), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (1659, 1662), False, 'import torch\n'), ((1938, 1964), 'os.getenv', 'os.getenv', (['"""LOCAL_RANK"""', '(0)'], {}), "('LOCAL_RANK', 0)\n", (1947, 1964), False, 'import os\n'), ((1086, 1112), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1102, 1112), False, 'import os\n'), ((2084, 2108), 'torch.LongTensor', 'torch.LongTensor', (['[seed]'], {}), '([seed])\n', (2100, 2108), False, 'import torch\n')] |
import math
import pymel.core as pymel
from omtk.core.classNode import Node
from omtk.libs import libAttr
from omtk.libs import libRigging
from . import model_avar_surface
class SplitterNode(Node):
"""
A splitter is a node network that take the parameterV that is normally sent through the follicles and
split it between two destination: the follicles and the jaw ref constraint.
The more the jaw is opened, the more we'll transfer to the jaw ref before sending to the follicle.
This is mainly used to ensure that any lip movement created by the jaw is canceled when the
animator try to correct the lips and the jaw is open. Otherwise since the jaw space and the surface space
To compute the displacement caused by the was, we'll usethe circumference around the jaw pivot.
This create an 'approximation' that might be wrong if some translation also occur in the jaw.
todo: test with corrective jaw translation
"""
def __init__(self):
super(SplitterNode, self).__init__() # useless
self.attr_inn_jaw_pt = None
self.attr_inn_jaw_radius = None
self.attr_inn_surface_v = None
self.attr_inn_surface_range_v = None
self.attr_inn_jaw_default_ratio = None
self.attr_out_surface_v = None
self.attr_out_jaw_ratio = None
def build(self, nomenclature_rig, **kwargs):
super(SplitterNode, self).build(**kwargs)
#
# Create inn and out attributes.
#
grp_splitter_inn = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterInn')
)
# The jaw opening amount in degree.
self.attr_inn_jaw_pt = libAttr.addAttr(grp_splitter_inn, 'innJawOpen')
# The relative uv coordinates normally sent to the follicles.
# Note that this value is expected to change at the output of the SplitterNode (see outSurfaceU and outSurfaceV)
self.attr_inn_surface_u = libAttr.addAttr(grp_splitter_inn, 'innSurfaceU')
self.attr_inn_surface_v = libAttr.addAttr(grp_splitter_inn, 'innSurfaceV')
# Use this switch to disable completely the splitter.
self.attr_inn_bypass = libAttr.addAttr(grp_splitter_inn, 'innBypassAmount')
# The arc length in world space of the surface controlling the follicles.
self.attr_inn_surface_range_v = libAttr.addAttr(grp_splitter_inn,
'innSurfaceRangeV') # How many degree does take the jaw to create 1 unit of surface deformation? (ex: 20)
# How much inn percent is the lips following the jaw by default.
# Note that this value is expected to change at the output of the SplitterNode (see attr_out_jaw_ratio)
self.attr_inn_jaw_default_ratio = libAttr.addAttr(grp_splitter_inn, 'jawDefaultRatio')
# The radius of the influence circle normally resolved by using the distance between the jaw and the avar as radius.
self.attr_inn_jaw_radius = libAttr.addAttr(grp_splitter_inn, 'jawRadius')
grp_splitter_out = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterOut')
)
self.attr_out_surface_u = libAttr.addAttr(grp_splitter_out, 'outSurfaceU')
self.attr_out_surface_v = libAttr.addAttr(grp_splitter_out, 'outSurfaceV')
self.attr_out_jaw_ratio = libAttr.addAttr(grp_splitter_out,
'outJawRatio') # How much percent this influence follow the jaw after cancellation.
#
# Connect inn and out network nodes so they can easily be found from the SplitterNode.
#
attr_inn = libAttr.addAttr(grp_splitter_inn, longName='inn', attributeType='message')
attr_out = libAttr.addAttr(grp_splitter_out, longName='out', attributeType='message')
pymel.connectAttr(self.node.message, attr_inn)
pymel.connectAttr(self.node.message, attr_out)
#
# Create node networks
# Step 1: Get the jaw displacement in uv space (parameterV only).
#
attr_jaw_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawCircumference'),
input1X=self.attr_inn_jaw_radius,
input2X=(math.pi * 2.0)
).outputX
attr_jaw_open_circle_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawOpenCircleRatio'),
operation=2, # divide
input1X=self.attr_inn_jaw_pt,
input2X=360.0
).outputX
attr_jaw_active_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawActiveCircumference'),
input1X=attr_jaw_circumference,
input2X=attr_jaw_open_circle_ratio
).outputX
attr_jaw_v_range = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getActiveJawRangeInSurfaceSpace'),
operation=2, # divide
input1X=attr_jaw_active_circumference,
input2X=self.attr_inn_surface_range_v
).outputX
#
# Step 2: Resolve the output jaw_ratio
#
# Note that this can throw a zero division warning in Maya.
# To prevent that we'll use some black-magic-ugly-ass-trick.
attr_jaw_ratio_cancelation = libRigging.create_safe_division(
self.attr_inn_surface_v,
attr_jaw_v_range,
nomenclature_rig,
'getJawRatioCancellation'
)
attr_jaw_ratio_out_raw = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawRatioOutUnlimited'),
operation=2, # substraction,
input1D=(
self.attr_inn_jaw_default_ratio,
attr_jaw_ratio_cancelation
)
).output1D
attr_jaw_ratio_out_limited = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawRatioOutLimited'),
inputR=attr_jaw_ratio_out_raw,
minR=0.0,
maxR=1.0
).outputR
#
# Step 3: Resolve attr_out_surface_u & attr_out_surface_v
#
attr_inn_jaw_default_ratio_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getJawDefaultRatioInv'),
inputX=self.attr_inn_jaw_default_ratio
).outputX
util_jaw_uv_default_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawDefaultRatioUvSpace'),
input1X=self.attr_inn_jaw_default_ratio,
input1Y=attr_inn_jaw_default_ratio_inv,
input2X=attr_jaw_v_range,
input2Y=attr_jaw_v_range
)
attr_jaw_uv_default_ratio = util_jaw_uv_default_ratio.outputX
attr_jaw_uv_default_ratio_inv = util_jaw_uv_default_ratio.outputY
attr_jaw_uv_limit_max = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMax'),
operation=2, # substract
input1D=(attr_jaw_v_range, attr_jaw_uv_default_ratio_inv)
).output1D
attr_jaw_uv_limit_min = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMin'),
operation=2, # substract
input1D=(attr_jaw_uv_default_ratio, attr_jaw_v_range)
).output1D
attr_jaw_cancel_range = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawCancelRange'),
inputR=self.attr_inn_surface_v,
minR=attr_jaw_uv_limit_min,
maxR=attr_jaw_uv_limit_max
).outputR
attr_out_surface_v_cancelled = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getCanceledUv'),
operation=2, # substraction
input1D=(self.attr_inn_surface_v, attr_jaw_cancel_range)
).output1D
#
# Connect output attributes
#
attr_inn_bypass_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getBypassInv'),
inputX=self.attr_inn_bypass
).outputX
# Connect output jaw_ratio
attr_output_jaw_ratio = libRigging.create_utility_node(
'blendWeighted',
input=(attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_jaw_ratio, self.attr_out_jaw_ratio)
# Connect output surface u
pymel.connectAttr(self.attr_inn_surface_u, self.attr_out_surface_u)
# Connect output surface_v
attr_output_surface_v = libRigging.create_utility_node(
'blendWeighted',
input=(attr_out_surface_v_cancelled, self.attr_inn_surface_v),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_surface_v, self.attr_out_surface_v)
class AvarSurfaceLipModel(model_avar_surface.AvarSurfaceModel):
"""
Custom avar model for the complex situation that is the lips.
This ensure that we are moving according to the jaw before sliding on the surface.
"""
def __init__(self, *args, **kwargs):
super(AvarSurfaceLipModel, self).__init__(*args, **kwargs)
self._attr_inn_jaw_bindpose = None
self._attr_inn_jaw_pitch = None
self._attr_inn_jaw_ratio_default = None
self._attr_inn_bypass_splitter = None
self._attr_out_jaw_ratio = None
def _create_interface(self):
super(AvarSurfaceLipModel, self)._create_interface()
self._attr_inn_jaw_bindpose = libAttr.addAttr(self.grp_rig, 'innJawBindPose', dataType='matrix')
self._attr_inn_jaw_pitch = libAttr.addAttr(self.grp_rig, 'innJawPitch', defaultValue=0)
self._attr_inn_jaw_ratio_default = libAttr.addAttr(self.grp_rig, 'innJawRatioDefault', defaultValue=0)
self._attr_inn_bypass_splitter = libAttr.addAttr(self.grp_rig, 'innBypassSplitter')
self._attr_inn_ud_bypass = libAttr.addAttr(self.grp_rig, 'innBypassUD')
# self._attr_inn_surface_length_u = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthU', defaultValue=0)
# self._attr_inn_surface_length_v = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthV', defaultValue=0)
self._attr_out_jaw_ratio = libAttr.addAttr(self.grp_rig, 'outJawRatio')
def connect_avar(self, avar):
super(AvarSurfaceLipModel, self).connect_avar(avar)
# Note: We expect a FaceLipAvar
pymel.connectAttr(avar._attr_jaw_bind_tm, self._attr_inn_jaw_bindpose)
pymel.connectAttr(avar._attr_jaw_pitch, self._attr_inn_jaw_pitch)
pymel.connectAttr(avar._attr_inn_jaw_ratio_default, self._attr_inn_jaw_ratio_default)
pymel.connectAttr(avar._attr_bypass_splitter, self._attr_inn_bypass_splitter)
pymel.connectAttr(avar.attr_ud_bypass, self._attr_inn_ud_bypass)
def _get_follicle_relative_uv_attr(self, **kwargs):
nomenclature_rig = self.get_nomenclature_rig()
attr_u, attr_v = super(AvarSurfaceLipModel, self)._get_follicle_relative_uv_attr(**kwargs)
util_decompose_jaw_bind_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=self._attr_inn_jaw_bindpose,
)
#
# Create and connect Splitter Node
#
splitter = SplitterNode()
splitter.build(
nomenclature_rig,
name=nomenclature_rig.resolve('splitter')
)
splitter.setParent(self.grp_rig)
# Resolve the radius of the jaw influence. Used by the splitter.
attr_jaw_radius = libRigging.create_utility_node(
'distanceBetween',
name=nomenclature_rig.resolve('getJawRadius'),
point1=self.grp_offset.translate,
point2=util_decompose_jaw_bind_tm.outputTranslate
).distance
# Resolve the jaw pitch. Used by the splitter.
attr_jaw_pitch = self._attr_inn_jaw_pitch
# Connect the splitter inputs
pymel.connectAttr(attr_u, splitter.attr_inn_surface_u)
pymel.connectAttr(attr_v, splitter.attr_inn_surface_v)
pymel.connectAttr(self._attr_inn_jaw_ratio_default, splitter.attr_inn_jaw_default_ratio)
pymel.connectAttr(self._attr_length_v, splitter.attr_inn_surface_range_v)
pymel.connectAttr(attr_jaw_radius, splitter.attr_inn_jaw_radius)
pymel.connectAttr(attr_jaw_pitch, splitter.attr_inn_jaw_pt)
pymel.connectAttr(self._attr_inn_bypass_splitter, splitter.attr_inn_bypass)
attr_u = splitter.attr_out_surface_u
attr_v = splitter.attr_out_surface_v
# Create constraint to controller the jaw reference
pymel.connectAttr(splitter.attr_out_jaw_ratio, self._attr_out_jaw_ratio)
#
# Implement the 'bypass' avars.
# Thoses avars bypass the splitter, used in corner cases only.
#
attr_attr_ud_bypass_adjusted = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getAdjustedUdBypass'),
input1X=self._attr_inn_ud_bypass,
input2X=self.multiplier_ud
).outputX
attr_v = libRigging.create_utility_node(
'addDoubleLinear',
name=nomenclature_rig.resolve('addBypassAvar'),
input1=attr_v,
input2=attr_attr_ud_bypass_adjusted
).output
return attr_u, attr_v
| [
"omtk.libs.libRigging.create_safe_division",
"omtk.libs.libRigging.create_utility_node",
"omtk.libs.libAttr.addAttr",
"pymel.core.connectAttr"
]
| [((1698, 1745), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""innJawOpen"""'], {}), "(grp_splitter_inn, 'innJawOpen')\n", (1713, 1745), False, 'from omtk.libs import libAttr\n'), ((1972, 2020), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""innSurfaceU"""'], {}), "(grp_splitter_inn, 'innSurfaceU')\n", (1987, 2020), False, 'from omtk.libs import libAttr\n'), ((2055, 2103), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""innSurfaceV"""'], {}), "(grp_splitter_inn, 'innSurfaceV')\n", (2070, 2103), False, 'from omtk.libs import libAttr\n'), ((2198, 2250), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""innBypassAmount"""'], {}), "(grp_splitter_inn, 'innBypassAmount')\n", (2213, 2250), False, 'from omtk.libs import libAttr\n'), ((2374, 2427), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""innSurfaceRangeV"""'], {}), "(grp_splitter_inn, 'innSurfaceRangeV')\n", (2389, 2427), False, 'from omtk.libs import libAttr\n'), ((2799, 2851), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""jawDefaultRatio"""'], {}), "(grp_splitter_inn, 'jawDefaultRatio')\n", (2814, 2851), False, 'from omtk.libs import libAttr\n'), ((3013, 3059), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn', '"""jawRadius"""'], {}), "(grp_splitter_inn, 'jawRadius')\n", (3028, 3059), False, 'from omtk.libs import libAttr\n'), ((3233, 3281), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_out', '"""outSurfaceU"""'], {}), "(grp_splitter_out, 'outSurfaceU')\n", (3248, 3281), False, 'from omtk.libs import libAttr\n'), ((3316, 3364), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_out', '"""outSurfaceV"""'], {}), "(grp_splitter_out, 'outSurfaceV')\n", (3331, 3364), False, 'from omtk.libs import libAttr\n'), ((3399, 3447), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_out', '"""outJawRatio"""'], {}), "(grp_splitter_out, 'outJawRatio')\n", (3414, 3447), False, 'from omtk.libs import libAttr\n'), ((3703, 3777), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_inn'], {'longName': '"""inn"""', 'attributeType': '"""message"""'}), "(grp_splitter_inn, longName='inn', attributeType='message')\n", (3718, 3777), False, 'from omtk.libs import libAttr\n'), ((3797, 3871), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['grp_splitter_out'], {'longName': '"""out"""', 'attributeType': '"""message"""'}), "(grp_splitter_out, longName='out', attributeType='message')\n", (3812, 3871), False, 'from omtk.libs import libAttr\n'), ((3880, 3926), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self.node.message', 'attr_inn'], {}), '(self.node.message, attr_inn)\n', (3897, 3926), True, 'import pymel.core as pymel\n'), ((3935, 3981), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self.node.message', 'attr_out'], {}), '(self.node.message, attr_out)\n', (3952, 3981), True, 'import pymel.core as pymel\n'), ((5508, 5631), 'omtk.libs.libRigging.create_safe_division', 'libRigging.create_safe_division', (['self.attr_inn_surface_v', 'attr_jaw_v_range', 'nomenclature_rig', '"""getJawRatioCancellation"""'], {}), "(self.attr_inn_surface_v, attr_jaw_v_range,\n nomenclature_rig, 'getJawRatioCancellation')\n", (5539, 5631), False, 'from omtk.libs import libRigging\n'), ((8853, 8918), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_output_jaw_ratio', 'self.attr_out_jaw_ratio'], {}), '(attr_output_jaw_ratio, self.attr_out_jaw_ratio)\n', (8870, 8918), True, 'import pymel.core as pymel\n'), ((8963, 9030), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self.attr_inn_surface_u', 'self.attr_out_surface_u'], {}), '(self.attr_inn_surface_u, self.attr_out_surface_u)\n', (8980, 9030), True, 'import pymel.core as pymel\n'), ((9323, 9388), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_output_surface_v', 'self.attr_out_surface_v'], {}), '(attr_output_surface_v, self.attr_out_surface_v)\n', (9340, 9388), True, 'import pymel.core as pymel\n'), ((10086, 10152), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""innJawBindPose"""'], {'dataType': '"""matrix"""'}), "(self.grp_rig, 'innJawBindPose', dataType='matrix')\n", (10101, 10152), False, 'from omtk.libs import libAttr\n'), ((10188, 10248), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""innJawPitch"""'], {'defaultValue': '(0)'}), "(self.grp_rig, 'innJawPitch', defaultValue=0)\n", (10203, 10248), False, 'from omtk.libs import libAttr\n'), ((10292, 10359), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""innJawRatioDefault"""'], {'defaultValue': '(0)'}), "(self.grp_rig, 'innJawRatioDefault', defaultValue=0)\n", (10307, 10359), False, 'from omtk.libs import libAttr\n'), ((10401, 10451), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""innBypassSplitter"""'], {}), "(self.grp_rig, 'innBypassSplitter')\n", (10416, 10451), False, 'from omtk.libs import libAttr\n'), ((10487, 10531), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""innBypassUD"""'], {}), "(self.grp_rig, 'innBypassUD')\n", (10502, 10531), False, 'from omtk.libs import libAttr\n'), ((10790, 10834), 'omtk.libs.libAttr.addAttr', 'libAttr.addAttr', (['self.grp_rig', '"""outJawRatio"""'], {}), "(self.grp_rig, 'outJawRatio')\n", (10805, 10834), False, 'from omtk.libs import libAttr\n'), ((10979, 11049), 'pymel.core.connectAttr', 'pymel.connectAttr', (['avar._attr_jaw_bind_tm', 'self._attr_inn_jaw_bindpose'], {}), '(avar._attr_jaw_bind_tm, self._attr_inn_jaw_bindpose)\n', (10996, 11049), True, 'import pymel.core as pymel\n'), ((11058, 11123), 'pymel.core.connectAttr', 'pymel.connectAttr', (['avar._attr_jaw_pitch', 'self._attr_inn_jaw_pitch'], {}), '(avar._attr_jaw_pitch, self._attr_inn_jaw_pitch)\n', (11075, 11123), True, 'import pymel.core as pymel\n'), ((11132, 11222), 'pymel.core.connectAttr', 'pymel.connectAttr', (['avar._attr_inn_jaw_ratio_default', 'self._attr_inn_jaw_ratio_default'], {}), '(avar._attr_inn_jaw_ratio_default, self.\n _attr_inn_jaw_ratio_default)\n', (11149, 11222), True, 'import pymel.core as pymel\n'), ((11226, 11303), 'pymel.core.connectAttr', 'pymel.connectAttr', (['avar._attr_bypass_splitter', 'self._attr_inn_bypass_splitter'], {}), '(avar._attr_bypass_splitter, self._attr_inn_bypass_splitter)\n', (11243, 11303), True, 'import pymel.core as pymel\n'), ((11312, 11376), 'pymel.core.connectAttr', 'pymel.connectAttr', (['avar.attr_ud_bypass', 'self._attr_inn_ud_bypass'], {}), '(avar.attr_ud_bypass, self._attr_inn_ud_bypass)\n', (11329, 11376), True, 'import pymel.core as pymel\n'), ((11627, 11722), 'omtk.libs.libRigging.create_utility_node', 'libRigging.create_utility_node', (['"""decomposeMatrix"""'], {'inputMatrix': 'self._attr_inn_jaw_bindpose'}), "('decomposeMatrix', inputMatrix=self.\n _attr_inn_jaw_bindpose)\n", (11657, 11722), False, 'from omtk.libs import libRigging\n'), ((12512, 12566), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_u', 'splitter.attr_inn_surface_u'], {}), '(attr_u, splitter.attr_inn_surface_u)\n', (12529, 12566), True, 'import pymel.core as pymel\n'), ((12575, 12629), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_v', 'splitter.attr_inn_surface_v'], {}), '(attr_v, splitter.attr_inn_surface_v)\n', (12592, 12629), True, 'import pymel.core as pymel\n'), ((12638, 12731), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self._attr_inn_jaw_ratio_default', 'splitter.attr_inn_jaw_default_ratio'], {}), '(self._attr_inn_jaw_ratio_default, splitter.\n attr_inn_jaw_default_ratio)\n', (12655, 12731), True, 'import pymel.core as pymel\n'), ((12735, 12808), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self._attr_length_v', 'splitter.attr_inn_surface_range_v'], {}), '(self._attr_length_v, splitter.attr_inn_surface_range_v)\n', (12752, 12808), True, 'import pymel.core as pymel\n'), ((12817, 12881), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_jaw_radius', 'splitter.attr_inn_jaw_radius'], {}), '(attr_jaw_radius, splitter.attr_inn_jaw_radius)\n', (12834, 12881), True, 'import pymel.core as pymel\n'), ((12890, 12949), 'pymel.core.connectAttr', 'pymel.connectAttr', (['attr_jaw_pitch', 'splitter.attr_inn_jaw_pt'], {}), '(attr_jaw_pitch, splitter.attr_inn_jaw_pt)\n', (12907, 12949), True, 'import pymel.core as pymel\n'), ((12958, 13033), 'pymel.core.connectAttr', 'pymel.connectAttr', (['self._attr_inn_bypass_splitter', 'splitter.attr_inn_bypass'], {}), '(self._attr_inn_bypass_splitter, splitter.attr_inn_bypass)\n', (12975, 13033), True, 'import pymel.core as pymel\n'), ((13194, 13266), 'pymel.core.connectAttr', 'pymel.connectAttr', (['splitter.attr_out_jaw_ratio', 'self._attr_out_jaw_ratio'], {}), '(splitter.attr_out_jaw_ratio, self._attr_out_jaw_ratio)\n', (13211, 13266), True, 'import pymel.core as pymel\n'), ((8623, 8801), 'omtk.libs.libRigging.create_utility_node', 'libRigging.create_utility_node', (['"""blendWeighted"""'], {'input': '(attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio)', 'weight': '(attr_inn_bypass_inv, self.attr_inn_bypass)'}), "('blendWeighted', input=(\n attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio), weight=(\n attr_inn_bypass_inv, self.attr_inn_bypass))\n", (8653, 8801), False, 'from omtk.libs import libRigging\n'), ((9099, 9271), 'omtk.libs.libRigging.create_utility_node', 'libRigging.create_utility_node', (['"""blendWeighted"""'], {'input': '(attr_out_surface_v_cancelled, self.attr_inn_surface_v)', 'weight': '(attr_inn_bypass_inv, self.attr_inn_bypass)'}), "('blendWeighted', input=(\n attr_out_surface_v_cancelled, self.attr_inn_surface_v), weight=(\n attr_inn_bypass_inv, self.attr_inn_bypass))\n", (9129, 9271), False, 'from omtk.libs import libRigging\n')] |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
| [
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.bool"
]
| [((159, 194), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""cenJets"""'], {}), "('gctDigis', 'cenJets')\n", (171, 194), True, 'import FWCore.ParameterSet.Config as cms\n'), ((222, 257), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""forJets"""'], {}), "('gctDigis', 'forJets')\n", (234, 257), True, 'import FWCore.ParameterSet.Config as cms\n'), ((281, 316), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""tauJets"""'], {}), "('gctDigis', 'tauJets')\n", (293, 316), True, 'import FWCore.ParameterSet.Config as cms\n'), ((343, 375), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""fake"""'], {}), "('gctDigis', 'fake')\n", (355, 375), True, 'import FWCore.ParameterSet.Config as cms\n'), ((402, 426), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""'], {}), "('gctDigis')\n", (414, 426), True, 'import FWCore.ParameterSet.Config as cms\n'), ((449, 482), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""isoEm"""'], {}), "('gctDigis', 'isoEm')\n", (461, 482), True, 'import FWCore.ParameterSet.Config as cms\n'), ((507, 543), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""gctDigis"""', '"""nonIsoEm"""'], {}), "('gctDigis', 'nonIsoEm')\n", (519, 543), True, 'import FWCore.ParameterSet.Config as cms\n'), ((561, 595), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""L1T/L1TGCT"""'], {}), "('L1T/L1TGCT')\n", (581, 595), True, 'import FWCore.ParameterSet.Config as cms\n'), ((611, 636), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (629, 636), True, 'import FWCore.ParameterSet.Config as cms\n'), ((659, 674), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (667, 674), True, 'import FWCore.ParameterSet.Config as cms\n'), ((691, 715), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (709, 715), True, 'import FWCore.ParameterSet.Config as cms\n'), ((741, 765), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (759, 765), True, 'import FWCore.ParameterSet.Config as cms\n'), ((791, 803), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (800, 803), True, 'import FWCore.ParameterSet.Config as cms\n')] |
from setuptools import setup
setup(
name='modestpy',
version='0.1',
description='FMI-compliant model identification package',
url='https://github.com/sdu-cfei/modest-py',
keywords='fmi fmu optimization model identification estimation',
author='<NAME>, Center for Energy Informatics SDU',
author_email='<EMAIL>, <EMAIL>',
license='BSD',
platforms=['Windows', 'Linux'],
packages=[
'modestpy',
'modestpy.estim',
'modestpy.estim.ga_parallel',
'modestpy.estim.ga',
'modestpy.estim.ps',
'modestpy.estim.scipy',
'modestpy.fmi',
'modestpy.utilities',
'modestpy.test'],
include_package_data=True,
install_requires=[
'fmpy[complete]',
'scipy',
'pandas',
'matplotlib',
'numpy',
'pyDOE',
'modestga'
],
classifiers=[
'Programming Language :: Python :: 3'
]
)
| [
"setuptools.setup"
]
| [((30, 789), 'setuptools.setup', 'setup', ([], {'name': '"""modestpy"""', 'version': '"""0.1"""', 'description': '"""FMI-compliant model identification package"""', 'url': '"""https://github.com/sdu-cfei/modest-py"""', 'keywords': '"""fmi fmu optimization model identification estimation"""', 'author': '"""<NAME>, Center for Energy Informatics SDU"""', 'author_email': '"""<EMAIL>, <EMAIL>"""', 'license': '"""BSD"""', 'platforms': "['Windows', 'Linux']", 'packages': "['modestpy', 'modestpy.estim', 'modestpy.estim.ga_parallel',\n 'modestpy.estim.ga', 'modestpy.estim.ps', 'modestpy.estim.scipy',\n 'modestpy.fmi', 'modestpy.utilities', 'modestpy.test']", 'include_package_data': '(True)', 'install_requires': "['fmpy[complete]', 'scipy', 'pandas', 'matplotlib', 'numpy', 'pyDOE',\n 'modestga']", 'classifiers': "['Programming Language :: Python :: 3']"}), "(name='modestpy', version='0.1', description=\n 'FMI-compliant model identification package', url=\n 'https://github.com/sdu-cfei/modest-py', keywords=\n 'fmi fmu optimization model identification estimation', author=\n '<NAME>, Center for Energy Informatics SDU', author_email=\n '<EMAIL>, <EMAIL>', license='BSD', platforms=['Windows', 'Linux'],\n packages=['modestpy', 'modestpy.estim', 'modestpy.estim.ga_parallel',\n 'modestpy.estim.ga', 'modestpy.estim.ps', 'modestpy.estim.scipy',\n 'modestpy.fmi', 'modestpy.utilities', 'modestpy.test'],\n include_package_data=True, install_requires=['fmpy[complete]', 'scipy',\n 'pandas', 'matplotlib', 'numpy', 'pyDOE', 'modestga'], classifiers=[\n 'Programming Language :: Python :: 3'])\n", (35, 789), False, 'from setuptools import setup\n')] |
import discord
from discord import Embed
@commands.Cog.listener()
async def on_message_delete(self, message):
channel = "xxxxxxxxxxxxxxxxxxxxx"
deleted = Embed(
description=f"Message deleted in {message.channel.mention}", color=0x4040EC
).set_author(name=message.author, url=Embed.Empty, icon_url=message.author.avatar_url)
deleted.add_field(name="Message", value=message.content)
deleted.timestamp = message.created_at
await channel.send(embed=deleted) | [
"discord.Embed"
]
| [((173, 259), 'discord.Embed', 'Embed', ([], {'description': 'f"""Message deleted in {message.channel.mention}"""', 'color': '(4210924)'}), "(description=f'Message deleted in {message.channel.mention}', color=\n 4210924)\n", (178, 259), False, 'from discord import Embed\n')] |
import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial) | [
"logging.getLogger",
"re.compile",
"time.sleep",
"pytest.fail",
"copy.deepcopy",
"datetime.timedelta",
"os.path.exists",
"os.listdir",
"subprocess.run",
"inspect.getfile",
"pytest.skip",
"pyhttpd.certs.CertificateSpec",
"re.match",
"os.path.isfile",
"os.lstat",
"re.sub",
"time.time",
"os.makedirs",
"os.path.join",
"shutil.copytree",
"datetime.datetime.now",
"shutil.rmtree",
"json.load"
]
| [((454, 481), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (471, 481), False, 'import logging\n'), ((881, 912), 'os.path.join', 'os.path.join', (['our_dir', '"""pebble"""'], {}), "(our_dir, 'pebble')\n", (893, 912), False, 'import os\n'), ((937, 977), 'os.path.join', 'os.path.join', (['self.env.gen_dir', '"""pebble"""'], {}), "(self.env.gen_dir, 'pebble')\n", (949, 977), False, 'import os\n'), ((1083, 1107), 'os.listdir', 'os.listdir', (['conf_src_dir'], {}), '(conf_src_dir)\n', (1093, 1107), False, 'import os\n'), ((1654, 1665), 'time.time', 'time.time', ([], {}), '()\n', (1663, 1665), False, 'import time\n'), ((2562, 2610), 'os.path.join', 'os.path.join', (['self.gen_dir', '"""apache/acme-ca.pem"""'], {}), "(self.gen_dir, 'apache/acme-ca.pem')\n", (2574, 2610), False, 'import os\n'), ((3115, 3149), 'os.path.join', 'os.path.join', (['self.bin_dir', '"""a2md"""'], {}), "(self.bin_dir, 'a2md')\n", (3127, 3149), False, 'import os\n'), ((4495, 4533), 'os.path.join', 'os.path.join', (['self.server_dir', 'dirpath'], {}), '(self.server_dir, dirpath)\n', (4507, 4533), False, 'import os\n'), ((4865, 4937), 'subprocess.run', 'subprocess.run', (["[self._apxs, '-q', name]"], {'capture_output': '(True)', 'text': '(True)'}), "([self._apxs, '-q', name], capture_output=True, text=True)\n", (4879, 4937), False, 'import subprocess\n'), ((7367, 7398), 'os.path.exists', 'os.path.exists', (['self._store_dir'], {}), '(self._store_dir)\n', (7381, 7398), False, 'import os\n'), ((7472, 7500), 'os.makedirs', 'os.makedirs', (['self._store_dir'], {}), '(self._store_dir)\n', (7483, 7500), False, 'import os\n'), ((7993, 8030), 'os.path.join', 'os.path.join', (['self._store_dir', '"""ocsp"""'], {}), "(self._store_dir, 'ocsp')\n", (8005, 8030), False, 'import os\n'), ((8094, 8117), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (8108, 8117), False, 'import os\n'), ((8234, 8280), 'os.path.join', 'os.path.join', (['self._store_dir', '"""staging"""', 'name'], {}), "(self._store_dir, 'staging', name)\n", (8246, 8280), False, 'import os\n'), ((8289, 8309), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (8300, 8309), False, 'import os\n'), ((8427, 8473), 'os.path.join', 'os.path.join', (['self._store_dir', '"""md_store.json"""'], {}), "(self._store_dir, 'md_store.json')\n", (8439, 8473), False, 'import os\n'), ((8524, 8587), 'os.path.join', 'os.path.join', (['self._store_dir', '"""accounts"""', 'acct', '"""account.json"""'], {}), "(self._store_dir, 'accounts', acct, 'account.json')\n", (8536, 8587), False, 'import os\n'), ((8642, 8704), 'os.path.join', 'os.path.join', (['self._store_dir', '"""accounts"""', 'acct', '"""account.pem"""'], {}), "(self._store_dir, 'accounts', acct, 'account.pem')\n", (8654, 8704), False, 'import os\n'), ((8750, 8790), 'os.path.join', 'os.path.join', (['self._store_dir', '"""domains"""'], {}), "(self._store_dir, 'domains')\n", (8762, 8790), False, 'import os\n'), ((8837, 8877), 'os.path.join', 'os.path.join', (['self._store_dir', '"""archive"""'], {}), "(self._store_dir, 'archive')\n", (8849, 8877), False, 'import os\n'), ((8924, 8964), 'os.path.join', 'os.path.join', (['self._store_dir', '"""staging"""'], {}), "(self._store_dir, 'staging')\n", (8936, 8964), False, 'import os\n'), ((9013, 9056), 'os.path.join', 'os.path.join', (['self._store_dir', '"""challenges"""'], {}), "(self._store_dir, 'challenges')\n", (9025, 9056), False, 'import os\n'), ((9509, 9581), 'os.path.join', 'os.path.join', (['self._store_dir', '"""domains"""', 'domain', '"""fallback-pubcert.pem"""'], {}), "(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')\n", (9521, 9581), False, 'import os\n'), ((9630, 9690), 'os.path.join', 'os.path.join', (['self._store_dir', '"""staging"""', 'domain', '"""job.json"""'], {}), "(self._store_dir, 'staging', domain, 'job.json')\n", (9642, 9690), False, 'import os\n'), ((9734, 9785), 'shutil.rmtree', 'shutil.rmtree', (['self._store_dir'], {'ignore_errors': '(False)'}), '(self._store_dir, ignore_errors=False)\n', (9747, 9785), False, 'import shutil\n'), ((9794, 9831), 'shutil.copytree', 'shutil.copytree', (['src', 'self._store_dir'], {}), '(src, self._store_dir)\n', (9809, 9831), False, 'import shutil\n'), ((12875, 12896), 'copy.deepcopy', 'copy.deepcopy', (['actual'], {}), '(actual)\n', (12888, 12896), False, 'import copy\n'), ((21206, 21249), 're.compile', 're.compile', (['"""OCSP response: +([^=\\\\n]+)\\\\n"""'], {}), "('OCSP response: +([^=\\\\n]+)\\\\n')\n", (21216, 21249), False, 'import re\n'), ((21686, 21727), 're.compile', 're.compile', (['"""Verify return code:\\\\s*(.+)"""'], {}), "('Verify return code:\\\\s*(.+)')\n", (21696, 21727), False, 'import re\n'), ((828, 856), 'inspect.getfile', 'inspect.getfile', (['MDTestSetup'], {}), '(MDTestSetup)\n', (843, 856), False, 'import inspect\n'), ((993, 1022), 'os.path.exists', 'os.path.exists', (['conf_dest_dir'], {}), '(conf_dest_dir)\n', (1007, 1022), False, 'import os\n'), ((1036, 1062), 'os.makedirs', 'os.makedirs', (['conf_dest_dir'], {}), '(conf_dest_dir)\n', (1047, 1062), False, 'import os\n'), ((1132, 1164), 'os.path.join', 'os.path.join', (['conf_src_dir', 'name'], {}), '(conf_src_dir, name)\n', (1144, 1164), False, 'import os\n'), ((1181, 1212), 're.match', 're.match', (['"""(.+).template"""', 'name'], {}), "('(.+).template', name)\n", (1189, 1212), False, 'import re\n'), ((4363, 4393), 'os.path.join', 'os.path.join', (['"""state"""', 'dirpath'], {}), "('state', dirpath)\n", (4375, 4393), False, 'import os\n'), ((6614, 6656), 'pytest.skip', 'pytest.skip', ([], {'msg': '"""ACME server not running"""'}), "(msg='ACME server not running')\n", (6625, 6656), False, 'import pytest\n'), ((6886, 6943), 'pytest.fail', 'pytest.fail', ([], {'msg': '"""ACME server not running"""', 'pytrace': '(False)'}), "(msg='ACME server not running', pytrace=False)\n", (6897, 6943), False, 'import pytest\n'), ((7412, 7463), 'shutil.rmtree', 'shutil.rmtree', (['self._store_dir'], {'ignore_errors': '(False)'}), '(self._store_dir, ignore_errors=False)\n', (7425, 7463), False, 'import shutil\n'), ((7643, 7674), 'os.path.exists', 'os.path.exists', (['self._store_dir'], {}), '(self._store_dir)\n', (7657, 7674), False, 'import os\n'), ((7688, 7716), 'os.makedirs', 'os.makedirs', (['self._store_dir'], {}), '(self._store_dir)\n', (7699, 7716), False, 'import os\n'), ((8131, 8173), 'shutil.rmtree', 'shutil.rmtree', (['dirpath'], {'ignore_errors': '(True)'}), '(dirpath, ignore_errors=True)\n', (8144, 8173), False, 'import shutil\n'), ((9888, 9929), 'os.path.join', 'os.path.join', (['self._store_dir', '"""accounts"""'], {}), "(self._store_dir, 'accounts')\n", (9900, 9929), False, 'import os\n'), ((10302, 10314), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10311, 10314), False, 'import json\n'), ((12545, 12576), 'datetime.datetime.now', 'datetime.now', (['not_before.tzinfo'], {}), '(not_before.tzinfo)\n', (12557, 12576), False, 'from datetime import datetime, timedelta\n'), ((12604, 12634), 'datetime.datetime.now', 'datetime.now', (['not_after.tzinfo'], {}), '(not_after.tzinfo)\n', (12616, 12634), False, 'from datetime import datetime, timedelta\n'), ((13172, 13188), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (13182, 13188), False, 'import os\n'), ((16888, 16899), 'time.time', 'time.time', ([], {}), '()\n', (16897, 16899), False, 'import time\n'), ((17798, 17813), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (17808, 17813), False, 'import time\n'), ((18065, 18076), 'time.time', 'time.time', ([], {}), '()\n', (18074, 18076), False, 'import time\n'), ((18662, 18673), 'time.time', 'time.time', ([], {}), '()\n', (18671, 18673), False, 'import time\n'), ((19157, 19172), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (19167, 19172), False, 'import time\n'), ((19259, 19270), 'time.time', 'time.time', ([], {}), '()\n', (19268, 19270), False, 'import time\n'), ((19386, 19407), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (19400, 19407), False, 'import os\n'), ((19449, 19464), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (19459, 19464), False, 'import time\n'), ((20271, 20312), 'os.path.join', 'os.path.join', (['self._store_dir', '"""accounts"""'], {}), "(self._store_dir, 'accounts')\n", (20283, 20312), False, 'import os\n'), ((20352, 20399), 'os.path.join', 'os.path.join', (['self._store_dir', '"""accounts"""', 'acct'], {}), "(self._store_dir, 'accounts', acct)\n", (20364, 20399), False, 'import os\n'), ((21454, 21497), 're.compile', 're.compile', (['"""OCSP Response Status:\\\\s*(.+)"""'], {}), "('OCSP Response Status:\\\\s*(.+)')\n", (21464, 21497), False, 'import re\n'), ((21989, 22000), 'time.time', 'time.time', ([], {}), '()\n', (21998, 22000), False, 'import time\n'), ((22269, 22284), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (22279, 22284), False, 'import time\n'), ((1336, 1360), 'os.path.isfile', 'os.path.isfile', (['src_path'], {}), '(src_path)\n', (1350, 1360), False, 'import os\n'), ((3506, 3564), 'pyhttpd.certs.CertificateSpec', 'CertificateSpec', ([], {'domains': "['localhost']", 'key_type': '"""rsa2048"""'}), "(domains=['localhost'], key_type='rsa2048')\n", (3521, 3564), False, 'from pyhttpd.certs import CertificateSpec\n'), ((5528, 5573), 're.sub', 're.sub', (['"""[_]"""', '"""-"""', 'request.node.originalname'], {}), "('[_]', '-', request.node.originalname)\n", (5534, 5573), False, 'import re\n'), ((6729, 6751), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.5)'}), '(seconds=0.5)\n', (6738, 6751), False, 'from datetime import datetime, timedelta\n'), ((7842, 7880), 'os.path.join', 'os.path.join', (['self._store_dir', 'dirpath'], {}), '(self._store_dir, dirpath)\n', (7854, 7880), False, 'import os\n'), ((13039, 13053), 'os.lstat', 'os.lstat', (['path'], {}), '(path)\n', (13047, 13053), False, 'import os\n'), ((17006, 17017), 'time.time', 'time.time', ([], {}), '()\n', (17015, 17017), False, 'import time\n'), ((17750, 17765), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (17760, 17765), False, 'import time\n'), ((18132, 18143), 'time.time', 'time.time', ([], {}), '()\n', (18141, 18143), False, 'import time\n'), ((18515, 18530), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (18525, 18530), False, 'import time\n'), ((18719, 18730), 'time.time', 'time.time', ([], {}), '()\n', (18728, 18730), False, 'import time\n'), ((19316, 19327), 'time.time', 'time.time', ([], {}), '()\n', (19325, 19327), False, 'import time\n'), ((22046, 22057), 'time.time', 'time.time', ([], {}), '()\n', (22055, 22057), False, 'import time\n'), ((2361, 2387), 'inspect.getfile', 'inspect.getfile', (['MDTestEnv'], {}), '(MDTestEnv)\n', (2376, 2387), False, 'import inspect\n'), ((3982, 4054), 're.compile', 're.compile', (['""".*certificate with serial \\\\S+ has no OCSP responder URL.*"""'], {}), "('.*certificate with serial \\\\S+ has no OCSP responder URL.*')\n", (3992, 4054), False, 'import re\n'), ((8323, 8358), 'os.path.join', 'os.path.join', (['dirpath', '"""authz.json"""'], {}), "(dirpath, 'authz.json')\n", (8335, 8358), False, 'import os\n'), ((1400, 1433), 'os.path.join', 'os.path.join', (['conf_dest_dir', 'name'], {}), '(conf_dest_dir, name)\n', (1412, 1433), False, 'import os\n'), ((3413, 3433), 'datetime.timedelta', 'timedelta', ([], {'days': '(-100)'}), '(days=-100)\n', (3422, 3433), False, 'from datetime import datetime, timedelta\n'), ((3472, 3491), 'datetime.timedelta', 'timedelta', ([], {'days': '(-10)'}), '(days=-10)\n', (3481, 3491), False, 'from datetime import datetime, timedelta\n')] |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class uex (Exchange):
def describe(self):
return self.deep_extend(super(uex, self).describe(), {
'id': 'uex',
'name': 'UEX',
'countries': ['SG', 'US'],
'version': 'v1.0.3',
'rateLimit': 1000,
'certified': False,
# new metainfo interface
'has': {
'CORS': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'3h': '180',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1440',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/43999923-051d9884-9e1f-11e8-965a-76948cb17678.jpg',
'api': 'https://open-api.uex.com/open/api',
'www': 'https://www.uex.com',
'doc': 'https://download.uex.com/doc/UEX-API-English-1.0.3.pdf',
'fees': 'https://www.uex.com/footer/ufees.html',
'referral': 'https://www.uex.com/signup.html?code=VAGQLL',
},
'api': {
'public': {
'get': [
'common/coins', # funding limits
'common/symbols',
'get_records', # ohlcvs
'get_ticker',
'get_trades',
'market_dept', # dept here is not a typo... they mean depth
],
},
'private': {
'get': [
'deposit_address_list',
'withdraw_address_list',
'deposit_history',
'withdraw_history',
'user/account',
'market', # an assoc array of market ids to corresponding prices traded most recently(prices of last trades per market)
'order_info',
'new_order', # a list of currently open orders
'all_order',
'all_trade',
],
'post': [
'create_order',
'cancel_order',
'create_withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0010,
'taker': 0.0010,
},
},
'exceptions': {
# descriptions from ↓ exchange
# '0': 'no error', # succeed
'4': InsufficientFunds, # {"code":"4","msg":"余额不足:0E-16","data":null}
'5': InvalidOrder, # fail to order {"code":"5","msg":"Price fluctuates more than1000.0%","data":null}
'6': InvalidOrder, # the quantity value less than the minimum one {"code":"6","msg":"数量小于最小值:0.001","data":null}
'7': InvalidOrder, # the quantity value more than the maximum one {"code":"7","msg":"数量大于最大值:10000","data":null}
'8': InvalidOrder, # fail to cancel order
'9': ExchangeError, # transaction be frozen
'13': ExchangeError, # Sorry, the program made an error, please contact with the manager.
'19': InsufficientFunds, # Available balance is insufficient.
'22': OrderNotFound, # The order does not exist. {"code":"22","msg":"not exist order","data":null}
'23': InvalidOrder, # Lack of parameters of numbers of transaction
'24': InvalidOrder, # Lack of parameters of transaction price
'100001': ExchangeError, # System is abnormal
'100002': ExchangeNotAvailable, # Update System
'100004': ExchangeError, # {"code":"100004","msg":"request parameter illegal","data":null}
'100005': AuthenticationError, # {"code":"100005","msg":"request sign illegal","data":null}
'100007': PermissionDenied, # illegal IP
'110002': ExchangeError, # unknown currency code
'110003': AuthenticationError, # fund password error
'110004': AuthenticationError, # fund password error
'110005': InsufficientFunds, # Available balance is insufficient.
'110020': AuthenticationError, # Username does not exist.
'110023': AuthenticationError, # Phone number is registered.
'110024': AuthenticationError, # Email box is registered.
'110025': PermissionDenied, # Account is locked by background manager
'110032': PermissionDenied, # The user has no authority to do self operation.
'110033': ExchangeError, # fail to recharge
'110034': ExchangeError, # fail to withdraw
'-100': ExchangeError, # {"code":"-100","msg":"Your request path is not exist or you can try method GET/POST.","data":null}
'-1000': ExchangeNotAvailable, # {"msg":"System maintenancenot ","code":"-1000","data":null}
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'limits': {
'BTC/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'BCH/USDT': {'amount': {'min': 0.001}, 'price': {'min': 0.01}},
'ETH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'BCH/BTC': {'amount': {'min': 0.001}, 'price': {'min': 0.000001}},
'LEEK/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'CTXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'COSM/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'MANA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/BTC': {'amount': {'min': 10}, 'price': {'min': 10}},
'OLT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'DTA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'KNT/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'REN/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'LBA/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'EXC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'ZIL/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'RATING/ETH': {'amount': {'min': 100}, 'price': {'min': 100}},
'CENNZ/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
'TTC/ETH': {'amount': {'min': 10}, 'price': {'min': 10}},
},
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
async def fetch_markets(self, params={}):
response = await self.publicGetCommonSymbols()
#
# {code: "0",
# msg: "suc",
# data: [{ symbol: "btcusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "btc",
# price_precision: 2 },
# { symbol: "ethusdt",
# count_coin: "usdt",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 2 },
# { symbol: "ethbtc",
# count_coin: "btc",
# amount_precision: 3,
# base_coin: "eth",
# price_precision: 6 }]}
#
result = []
markets = response['data']
for i in range(0, len(markets)):
market = markets[i]
id = market['symbol']
baseId = market['base_coin']
quoteId = market['count_coin']
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount_precision'],
'price': market['price_precision'],
}
active = True
defaultLimits = self.safe_value(self.options['limits'], symbol, {})
limits = self.deep_extend({
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}, defaultLimits)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUserAccount(params)
#
# {code: "0",
# msg: "suc",
# data: {total_asset: "0.00000000",
# coin_list: [{ normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "usdt" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "btc" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "eth" },
# { normal: "0.00000000",
# btcValuatin: "0.00000000",
# locked: "0.00000000",
# coin: "ren" }]}}
#
balances = response['data']['coin_list']
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['coin']
code = currencyId.upper()
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
free = float(balance['normal'])
used = float(balance['locked'])
total = self.sum(free, used)
account['free'] = free
account['used'] = used
account['total'] = total
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
response = await self.publicGetMarketDept(self.extend({
'symbol': self.market_id(symbol),
'type': 'step0', # step1, step2 from most detailed to least detailed
}, params))
#
# {code: "0",
# msg: "suc",
# data: {tick: {asks: [["0.05824200", 9.77],
# ["0.05830000", 7.81],
# ["0.05832900", 8.59],
# ["0.10000000", 0.001] ],
# bids: [["0.05780000", 8.25],
# ["0.05775000", 8.12],
# ["0.05773200", 8.57],
# ["0.00010000", 0.79] ],
# time: 1533412622463 }} }
#
timestamp = self.safe_integer(response['data']['tick'], 'time')
return self.parse_order_book(response['data']['tick'], timestamp)
def parse_ticker(self, ticker, market=None):
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
timestamp = self.safe_integer(ticker, 'time')
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
marketId = marketId.lower()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
percentage = change * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTicker(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: {symbol: "ETHBTC",
# high: 0.058426,
# vol: 19055.875,
# last: 0.058019,
# low: 0.055802,
# change: 0.03437271,
# buy: "0.05780000",
# sell: "0.05824200",
# time: 1533413083184} }
#
return self.parse_ticker(response['data'], market)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# { amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
#
# private fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539, # only in fetchMyTrades
# ask_id: 3669583, # only in fetchMyTrades
# }
#
timestamp = self.safe_integer_2(trade, 'create_time', 'ctime')
if timestamp is None:
timestring = self.safe_string(trade, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
side = self.safe_string_2(trade, 'side', 'type')
if side is not None:
side = side.lower()
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'volume', 'amount')
cost = self.safe_float(trade, 'deal_price')
if cost is None:
if amount is not None:
if price is not None:
cost = amount * price
fee = None
feeCost = self.safe_float_2(trade, 'fee', 'deal_fee')
if feeCost is not None:
feeCurrency = self.safe_string(trade, 'feeCoin')
if feeCurrency is not None:
currencyId = feeCurrency.lower()
if currencyId in self.currencies_by_id:
feeCurrency = self.currencies_by_id[currencyId]['code']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
orderIdField = 'ask_id' if (side == 'sell') else 'bid_id'
orderId = self.safe_string(trade, orderIdField)
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetTrades(self.extend({
'symbol': market['id'],
}, params))
#
# {code: "0",
# msg: "suc",
# data: [{ amount: 0.88,
# create_time: 1533414358000,
# price: 0.058019,
# id: 406531,
# type: "sell" },
# { amount: 4.88,
# create_time: 1533414331000,
# price: 0.058019,
# id: 406530,
# type: "buy" },
# { amount: 0.5,
# create_time: 1533414311000,
# price: 0.058019,
# id: 406529,
# type: "sell" }]}
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
return [
ohlcv[0] * 1000, # timestamp
ohlcv[1], # open
ohlcv[2], # high
ohlcv[3], # low
ohlcv[4], # close
ohlcv[5], # volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe], # in minutes
}
response = await self.publicGetGetRecords(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data:
# [[1533402420, 0.057833, 0.057833, 0.057833, 0.057833, 18.1],
# [1533402480, 0.057833, 0.057833, 0.057833, 0.057833, 29.88],
# [1533402540, 0.057833, 0.057833, 0.057833, 0.057833, 29.06] ]}
#
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
await self.load_markets()
market = self.market(symbol)
orderType = '1' if (type == 'limit') else '2'
orderSide = side.upper()
amountToPrecision = self.amount_to_precision(symbol, amount)
request = {
'side': orderSide,
'type': orderType,
'symbol': market['id'],
'volume': amountToPrecision,
# An excerpt from their docs:
# side required Trading Direction
# type required pending order types,1:Limit-price Delegation 2:Market- price Delegation
# volume required
# Purchase Quantity(polysemy,multiplex field)
# type=1: Quantity of buying and selling
# type=2: Buying represents gross price, and selling represents total number
# Trading restriction user/me-user information
# price optional Delegation Price:type=2:self parameter is no use.
# fee_is_user_exchange_coin optional
# 0,when making transactions with all platform currencies,
# self parameter represents whether to use them to pay
# fees or not and 0 is no, 1 is yes.
}
priceToPrecision = None
if type == 'limit':
priceToPrecision = self.price_to_precision(symbol, price)
request['price'] = priceToPrecision
response = await self.privatePostCreateOrder(self.extend(request, params))
#
# {code: '0',
# msg: 'suc',
# data: {'order_id' : 34343} }
#
result = self.parse_order(response['data'], market)
return self.extend(result, {
'info': response,
'symbol': symbol,
'type': type,
'side': side,
'status': 'open',
'price': float(priceToPrecision),
'amount': float(amountToPrecision),
})
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privatePostCancelOrder(self.extend(request, params))
order = self.safe_value(response, 'data', {})
return self.extend(self.parse_order(order), {
'id': id,
'symbol': symbol,
'status': 'canceled',
})
def parse_order_status(self, status):
statuses = {
'0': 'open', # INIT(0,"primary order,untraded and not enter the market")
'1': 'open', # NEW_(1,"new order,untraded and enter the market ")
'2': 'closed', # FILLED(2,"complete deal")
'3': 'open', # PART_FILLED(3,"partial deal")
'4': 'canceled', # CANCELED(4,"already withdrawn")
'5': 'canceled', # PENDING_CANCEL(5,"pending withdrawak")
'6': 'canceled', # EXPIRED(6,"abnormal orders")
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# createOrder
#
# {"order_id":34343}
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# { side: "BUY",
# total_price: "0.10000000",
# created_at: 1510993841000,
# avg_price: "0.10000000",
# countCoin: "btc",
# source: 1,
# type: 1,
# side_msg: "买入",
# volume: "1.000",
# price: "0.10000000",
# source_msg: "WEB",
# status_msg: "完全成交",
# deal_volume: "1.00000000",
# id: 424,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "1.000",
# feeCoin: "YLB",
# price: "0.10000000",
# fee: "0.16431104",
# ctime: 1510996571195,
# deal_price: "0.10000000",
# id: 306,
# type: "买入" }],
# status: 2 }
#
# fetchOrder
#
# {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }}
#
side = self.safe_string(order, 'side')
if side is not None:
side = side.lower()
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
baseId = self.safe_string(order, 'baseCoin')
quoteId = self.safe_string(order, 'countCoin')
marketId = baseId + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
if (baseId is not None) and(quoteId is not None):
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created_at')
if timestamp is None:
timestring = self.safe_string(order, 'created_at')
if timestring is not None:
timestamp = self.parse8601('2018-' + timestring + ':00Z')
lastTradeTimestamp = None
fee = None
average = self.safe_float(order, 'avg_price')
price = self.safe_float(order, 'price')
if price == 0:
price = average
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'deal_volume')
remaining = self.safe_float(order, 'remain_volume')
cost = self.safe_float(order, 'total_price')
id = self.safe_string_2(order, 'id', 'order_id')
trades = None
tradeList = self.safe_value(order, 'tradeList', [])
feeCurrencies = {}
feeCost = None
for i in range(0, len(tradeList)):
trade = self.parse_trade(tradeList[i], market)
if feeCost is None:
feeCost = 0
feeCost = feeCost + trade['fee']['cost']
tradeFeeCurrency = trade['fee']['currency']
feeCurrencies[tradeFeeCurrency] = trade['fee']['cost']
if trades is None:
trades = []
lastTradeTimestamp = trade['timestamp']
trades.append(self.extend(trade, {
'order': id,
}))
if feeCost is not None:
feeCurrency = None
keys = list(feeCurrencies.keys())
numCurrencies = len(keys)
if numCurrencies == 1:
feeCurrency = keys[0]
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
return result
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersWithMethod() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# orderList: [{ side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }]} }
#
# privateGetNewOrder returns resultList, privateGetAllOrder returns orderList
orders = self.safe_value_2(response['data'], 'orderList', 'resultList', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetNewOrder', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetAllOrder', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = await self.privateGetOrderInfo(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {trade_list: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# order_info: { side: "SELL",
# total_price: "0.010",
# created_at: 1533616673000,
# avg_price: "0.05816200",
# countCoin: "btc",
# source: 3,
# type: 2,
# side_msg: "卖出",
# volume: "0.010",
# price: "0.00000000",
# source_msg: "API",
# status_msg: "完全成交",
# deal_volume: "0.01000000",
# id: 3669583,
# remain_volume: "0.00000000",
# baseCoin: "eth",
# tradeList: [{ volume: "0.010",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出" }],
# status: 2 }} }
#
return self.parse_order(response['data']['order_info'], market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# pageSize optional page size
# page optional page number
'symbol': market['id'],
}
if limit is not None:
request['pageSize'] = limit
response = await self.privateGetAllTrade(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: { count: 1,
# resultList: [{ volume: "0.010",
# side: "SELL",
# feeCoin: "BTC",
# price: "0.05816200",
# fee: "0.00000029",
# ctime: 1533616674000,
# deal_price: "0.00058162",
# id: 415779,
# type: "卖出",
# bid_id: 3669539,
# ask_id: 3669583 }]} }
#
trades = self.safe_value(response['data'], 'resultList', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-address-of-assigned-token
response = await self.privateGetDepositAddressList(self.extend(request, params))
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "addressList": [
# {
# "address": "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# "tag": "",
# },
# ],
# },
# }
#
data = self.safe_value(response, 'data')
if data is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no data')
addressList = self.safe_value(data, 'addressList')
if addressList is None:
raise InvalidAddress(self.id + ' privateGetDepositAddressList() returned no address list')
numAddresses = len(addressList)
if numAddresses < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
firstAddress = addressList[0]
address = self.safe_string(firstAddress, 'address')
tag = self.safe_string(firstAddress, 'tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_transactions_by_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
currency = self.currency(code)
request = {
'coin': currency['id'],
}
if limit is not None:
request['pageSize'] = limit # default 10
transactionType = 'deposit' if (type == 'deposit') else 'withdraw' # instead of withdrawal...
method = 'privateGet' + self.capitalize(transactionType) + 'History'
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-deposit-record-of-assigned-token
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Query-withdraw-record-of-assigned-token
response = await getattr(self, method)(self.extend(request, params))
#
# {code: "0",
# msg: "suc",
# data: {depositList: [{ createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# {
# "code": "0",
# "msg": "suc",
# "data": {
# "withdrawList": [{
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }]
# }
# }
#
transactions = self.safe_value(response['data'], transactionType + 'List')
return self.parse_transactions_by_type(type, transactions, code, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('deposit', code, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_by_type('withdrawal', code, since, limit, params)
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filterByCurrencySinceLimit(result, code, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# { createdAt: 1533615955000,
# amount: "0.01",
# updateAt: 1533616311000,
# txid: "0x0922fde6ab8270fe6eb31cb5a37dc732d96dc8193f81cf46c4ab29fde…",
# tag: "",
# confirmations: 30,
# addressTo: "0x198803ef8e0df9e8812c0105421885e843e6d2e2",
# status: 1,
# coin: "ETH" }]} }
#
# withdrawals
#
# {
# "updateAt": 1540344965000,
# "createdAt": 1539311971000,
# "status": 0,
# "addressTo": "tz1d7DXJXU3AKWh77gSmpP7hWTeDYs8WF18q",
# "tag": "100128877",
# "id": 5,
# "txid": "",
# "fee": 0.0,
# "amount": "1",
# "symbol": "XTZ"
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_integer(transaction, 'createdAt')
updated = self.safe_integer(transaction, 'updateAt')
code = None
currencyId = self.safe_string_2(transaction, 'symbol', 'coin')
currency = self.safe_value(self.currencies_by_id, currencyId)
if currency is not None:
code = currency['code']
else:
code = self.common_currency_code(currencyId)
address = self.safe_string(transaction, 'addressTo')
tag = self.safe_string(transaction, 'tag')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if (type == 'deposit') and(feeCost is None):
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transaction_status(self, status):
statuses = {
'0': 'pending', # unaudited
'1': 'ok', # audited
'2': 'failed', # audit failed
'3': 'pending', # "payment"
'4': 'failed', # payment failed
'5': 'ok',
'6': 'canceled',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
fee = self.safe_float(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + 'requires a "fee" extra parameter in its last argument')
self.check_address(address)
currency = self.currency(code)
request = {
'coin': currency['id'],
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'fee': fee, # balance >= self.sum(amount, fee)
}
if tag is not None:
request['tag'] = tag
# https://github.com/UEX-OpenAPI/API_Docs_en/wiki/Withdraw
response = await self.privatePostCreateWithdraw(self.extend(request, params))
id = None
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.seconds())
auth = ''
query = self.keysort(self.extend(params, {
'api_key': self.apiKey,
'time': timestamp,
}))
keys = list(query.keys())
for i in range(0, len(keys)):
key = keys[i]
auth += key
auth += str(query[key])
signature = self.hash(self.encode(auth + self.secret))
if query:
if method == 'GET':
url += '?' + self.urlencode(query) + '&sign=' + signature
else:
body = self.urlencode(query) + '&sign=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"code":"0","msg":"suc","data":{}}
#
code = self.safe_string(response, 'code')
# message = self.safe_string(response, 'msg')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code != '0':
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| [
"json.loads",
"ccxt.base.errors.InvalidOrder",
"ccxt.base.errors.ArgumentsRequired",
"ccxt.base.errors.InvalidAddress",
"ccxt.base.errors.ExchangeError"
]
| [((33242, 33328), 'ccxt.base.errors.ArgumentsRequired', 'ArgumentsRequired', (["(self.id + ' fetchOrdersWithMethod() requires a symbol argument')"], {}), "(self.id +\n ' fetchOrdersWithMethod() requires a symbol argument')\n", (33259, 33328), False, 'from ccxt.base.errors import ArgumentsRequired\n'), ((39101, 39173), 'ccxt.base.errors.ArgumentsRequired', 'ArgumentsRequired', (["(self.id + ' fetchMyTrades requires a symbol argument')"], {}), "(self.id + ' fetchMyTrades requires a symbol argument')\n", (39118, 39173), False, 'from ccxt.base.errors import ArgumentsRequired\n'), ((41385, 41461), 'ccxt.base.errors.InvalidAddress', 'InvalidAddress', (["(self.id + ' privateGetDepositAddressList() returned no data')"], {}), "(self.id + ' privateGetDepositAddressList() returned no data')\n", (41399, 41461), False, 'from ccxt.base.errors import InvalidAddress\n'), ((41571, 41659), 'ccxt.base.errors.InvalidAddress', 'InvalidAddress', (["(self.id + ' privateGetDepositAddressList() returned no address list')"], {}), "(self.id +\n ' privateGetDepositAddressList() returned no address list')\n", (41585, 41659), False, 'from ccxt.base.errors import InvalidAddress\n'), ((41743, 41828), 'ccxt.base.errors.InvalidAddress', 'InvalidAddress', (["(self.id + ' privatePostDepositAddresses() returned no addresses')"], {}), "(self.id + ' privatePostDepositAddresses() returned no addresses'\n )\n", (41757, 41828), False, 'from ccxt.base.errors import InvalidAddress\n'), ((42297, 42383), 'ccxt.base.errors.ArgumentsRequired', 'ArgumentsRequired', (["(self.id + ' fetchWithdrawals requires a currency code argument')"], {}), "(self.id +\n ' fetchWithdrawals requires a currency code argument')\n", (42314, 42383), False, 'from ccxt.base.errors import ArgumentsRequired\n'), ((48625, 48713), 'ccxt.base.errors.ArgumentsRequired', 'ArgumentsRequired', (['(self.id + \'requires a "fee" extra parameter in its last argument\')'], {}), '(self.id +\n \'requires a "fee" extra parameter in its last argument\')\n', (48642, 48713), False, 'from ccxt.base.errors import ArgumentsRequired\n'), ((50892, 50908), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (50902, 50908), False, 'import json\n'), ((51366, 51389), 'ccxt.base.errors.ExchangeError', 'ExchangeError', (['feedback'], {}), '(feedback)\n', (51379, 51389), False, 'from ccxt.base.errors import ExchangeError\n'), ((22721, 23165), 'ccxt.base.errors.InvalidOrder', 'InvalidOrder', (['(self.id +\n " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options[\'createMarketBuyOrderRequiresPrice\'] = False to supply the cost in the amount argument(the exchange-specific behaviour)"\n )'], {}), '(self.id +\n " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options[\'createMarketBuyOrderRequiresPrice\'] = False to supply the cost in the amount argument(the exchange-specific behaviour)"\n )\n', (22733, 23165), False, 'from ccxt.base.errors import InvalidOrder\n')] |
# Generated by Django 3.0.8 on 2020-07-11 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsapp', '0002_auto_20200711_1124'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('indian_news', models.TextField()),
('national_news', models.TextField()),
('international_news', models.TextField()),
('bollywood_news', models.TextField()),
('lifestyle_news', models.TextField()),
('sport_news', models.TextField()),
('business_news', models.TextField()),
('sharemarket_news', models.TextField()),
('corona_news', models.TextField()),
('space_news', models.TextField()),
('motivation_news', models.TextField()),
],
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.TextField"
]
| [((328, 421), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (344, 421), False, 'from django.db import migrations, models\n'), ((445, 467), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (465, 467), False, 'from django.db import migrations, models\n'), ((502, 520), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (518, 520), False, 'from django.db import migrations, models\n'), ((557, 575), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (573, 575), False, 'from django.db import migrations, models\n'), ((617, 635), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (633, 635), False, 'from django.db import migrations, models\n'), ((673, 691), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (689, 691), False, 'from django.db import migrations, models\n'), ((729, 747), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (745, 747), False, 'from django.db import migrations, models\n'), ((781, 799), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (797, 799), False, 'from django.db import migrations, models\n'), ((836, 854), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (852, 854), False, 'from django.db import migrations, models\n'), ((894, 912), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (910, 912), False, 'from django.db import migrations, models\n'), ((947, 965), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (963, 965), False, 'from django.db import migrations, models\n'), ((999, 1017), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1015, 1017), False, 'from django.db import migrations, models\n'), ((1056, 1074), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1072, 1074), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
from enum import Enum
from typing import TypeVar, Type, List, Iterable, cast
from faker.providers import BaseProvider
TEnum = TypeVar("TEnum", bound=Enum)
class EnumProvider(BaseProvider):
"""
A Provider for enums.
"""
def enum(self, enum_cls: Type[TEnum]) -> TEnum:
members: List[TEnum] = list(cast(Iterable[TEnum], enum_cls))
return self.random_element(members)
| [
"typing.cast",
"typing.TypeVar"
]
| [((152, 180), 'typing.TypeVar', 'TypeVar', (['"""TEnum"""'], {'bound': 'Enum'}), "('TEnum', bound=Enum)\n", (159, 180), False, 'from typing import TypeVar, Type, List, Iterable, cast\n'), ((349, 380), 'typing.cast', 'cast', (['Iterable[TEnum]', 'enum_cls'], {}), '(Iterable[TEnum], enum_cls)\n', (353, 380), False, 'from typing import TypeVar, Type, List, Iterable, cast\n')] |
# Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app
import bottle
import ujson
from bottle import route, run
@route("/")
def index():
return ujson.dumps({"test": True})
app = bottle.default_app()
| [
"ujson.dumps",
"bottle.default_app",
"bottle.route"
]
| [((170, 180), 'bottle.route', 'route', (['"""/"""'], {}), "('/')\n", (175, 180), False, 'from bottle import route, run\n'), ((241, 261), 'bottle.default_app', 'bottle.default_app', ([], {}), '()\n', (259, 261), False, 'import bottle\n'), ((205, 232), 'ujson.dumps', 'ujson.dumps', (["{'test': True}"], {}), "({'test': True})\n", (216, 232), False, 'import ujson\n')] |
from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash, # type: bytes
private_key # type: _PrivateKey
):
# type: (...) -> _Signature
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `eth_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash, # type: bytes
signature, # type: _Signature
public_key # type: _PublicKey
) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash, # type: bytes
signature # type: _Signature
):
# type: (...) -> _PublicKey
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `eth_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key):
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| [
"eth_keys.exceptions.ValidationError",
"eth_keys.validation.validate_message_hash"
]
| [((912, 947), 'eth_keys.validation.validate_message_hash', 'validate_message_hash', (['message_hash'], {}), '(message_hash)\n', (933, 947), False, 'from eth_keys.validation import validate_message_hash\n'), ((2154, 2189), 'eth_keys.validation.validate_message_hash', 'validate_message_hash', (['message_hash'], {}), '(message_hash)\n', (2175, 2189), False, 'from eth_keys.validation import validate_message_hash\n'), ((1018, 1114), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"""'], {}), "(\n 'The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`')\n", (1033, 1114), False, 'from eth_keys.exceptions import ValidationError\n'), ((1278, 1413), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""Backend returned an invalid signature. Return value must be an instance of `eth_keys.datatypes.Signature`"""'], {}), "(\n 'Backend returned an invalid signature. Return value must be an instance of `eth_keys.datatypes.Signature`'\n )\n", (1293, 1413), False, 'from eth_keys.exceptions import ValidationError\n'), ((1760, 1854), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"""'], {}), "(\n 'The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`')\n", (1775, 1854), False, 'from eth_keys.exceptions import ValidationError\n'), ((2257, 2350), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""The `signature` must be an instance of `eth_keys.datatypes.Signature`"""'], {}), "(\n 'The `signature` must be an instance of `eth_keys.datatypes.Signature`')\n", (2272, 2350), False, 'from eth_keys.exceptions import ValidationError\n'), ((2518, 2654), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""Backend returned an invalid public_key. Return value must be an instance of `eth_keys.datatypes.PublicKey`"""'], {}), "(\n 'Backend returned an invalid public_key. Return value must be an instance of `eth_keys.datatypes.PublicKey`'\n )\n", (2533, 2654), False, 'from eth_keys.exceptions import ValidationError\n'), ((2845, 2941), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"""'], {}), "(\n 'The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`')\n", (2860, 2941), False, 'from eth_keys.exceptions import ValidationError\n'), ((3108, 3244), 'eth_keys.exceptions.ValidationError', 'ValidationError', (['"""Backend returned an invalid public_key. Return value must be an instance of `eth_keys.datatypes.PublicKey`"""'], {}), "(\n 'Backend returned an invalid public_key. Return value must be an instance of `eth_keys.datatypes.PublicKey`'\n )\n", (3123, 3244), False, 'from eth_keys.exceptions import ValidationError\n')] |
from oacensus.scraper import Scraper
from oacensus.commands import defaults
class TestScraper(Scraper):
"""
Scraper for testing scraper methods.
"""
aliases = ['testscraper']
def scrape(self):
pass
def process(self):
pass
def test_hashcode():
scraper = Scraper.create_instance('testscraper', defaults)
assert len(scraper.hashcode()) == 32
def test_run():
scraper = Scraper.create_instance('testscraper', defaults)
scraper.run()
| [
"oacensus.scraper.Scraper.create_instance"
]
| [((301, 349), 'oacensus.scraper.Scraper.create_instance', 'Scraper.create_instance', (['"""testscraper"""', 'defaults'], {}), "('testscraper', defaults)\n", (324, 349), False, 'from oacensus.scraper import Scraper\n'), ((422, 470), 'oacensus.scraper.Scraper.create_instance', 'Scraper.create_instance', (['"""testscraper"""', 'defaults'], {}), "('testscraper', defaults)\n", (445, 470), False, 'from oacensus.scraper import Scraper\n')] |
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
client = ModbusClient('192.168.178.61', port=502)
client.connect()
f = client.read_holding_registers(305,1)
print(f.registers) | [
"logging.basicConfig",
"pymodbus.client.sync.ModbusTcpClient",
"logging.getLogger"
]
| [((194, 228), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (213, 228), False, 'import logging\n'), ((235, 254), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (252, 254), False, 'import logging\n'), ((292, 332), 'pymodbus.client.sync.ModbusTcpClient', 'ModbusClient', (['"""192.168.178.61"""'], {'port': '(502)'}), "('192.168.178.61', port=502)\n", (304, 332), True, 'from pymodbus.client.sync import ModbusTcpClient as ModbusClient\n')] |
import unittest
from selenium import webdriver
import page
class AboutPage(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://nicolesmith.nyc")
#self.driver.get("http://127.0.0.1:4747/about")
self.about_page = page.AboutPage(self.driver)
######## HEADER STUFF ########
def test_title_on_about_page(self):
assert self.about_page.is_title_matches(), "about page title doesn't match"
def test_click_get_quote(self):
assert self.about_page.click_quote_button(), "link to contact page is broken"
def test_click_home_button(self):
assert self.about_page.click_home_button(), "home button does not go to homepage"
@unittest.skip("Needs fixing.")
def test_click_about_link(self):
assert self.about_page.click_projects_link(), "about link does not go to about page"
@unittest.skip("Needs fixing.")
def test_click_projects_link(self):
assert self.about_page.click_projects_link(), "projects link does not go to projects page"
@unittest.skip("Needs fixing.")
def test_click_services_link(self):
assert self.about_page.click_projects_link(), "services link does not go to services page"
######## PAGE SPECIFIC STUFF ########
def test_click_resume(self):
return self.about_page.click_resume(), "link to resume is broken"
def test_click_resumator(self):
return self.about_page.click_resumator(), "link to resumator is broken"
def test_click_contact_me(self):
return self.about_page.click_contact_me(), "link to contact me page is broken in FAQ"
def test_click_html5up_backlink(self):
return self.about_page.click_html5up_backlink(), "backlink to html5up in FAQ is broken"
######## FOOTER STUFF ########
def test_click_github(self):
assert self.about_page.click_github_button(), "link to github is broken"
def test_click_linkedin(self):
assert self.about_page.click_linkedin_button(), "link to linkedin is broken"
def test_click_gplus(self):
assert self.about_page.click_gplus_button(), "link to google plus is broken"
def test_click_twitter(self):
assert self.about_page.click_twitter_button(), "link to twitter is broken"
def test_click_html5up(self):
assert self.about_page.click_html5up_link(), "link to html5up template owner is broken"
def test_copyright_on_about_page(self):
assert self.about_page.is_copyright_matches(), "about page has wrong copyright"
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"unittest.skip",
"selenium.webdriver.Firefox",
"page.AboutPage"
]
| [((739, 769), 'unittest.skip', 'unittest.skip', (['"""Needs fixing."""'], {}), "('Needs fixing.')\n", (752, 769), False, 'import unittest\n'), ((906, 936), 'unittest.skip', 'unittest.skip', (['"""Needs fixing."""'], {}), "('Needs fixing.')\n", (919, 936), False, 'import unittest\n'), ((1082, 1112), 'unittest.skip', 'unittest.skip', (['"""Needs fixing."""'], {}), "('Needs fixing.')\n", (1095, 1112), False, 'import unittest\n'), ((2649, 2664), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2662, 2664), False, 'import unittest\n'), ((140, 159), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (157, 159), False, 'from selenium import webdriver\n'), ((292, 319), 'page.AboutPage', 'page.AboutPage', (['self.driver'], {}), '(self.driver)\n', (306, 319), False, 'import page\n')] |
# Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
async def read_a_file_or_something():
await asyncio.sleep(0)
return "here's a file"
def assert_eq(l, r):
assert l == r
class FileResource(CustomResource):
contents: Output[str]
def __init__(self, name: str, file_contents: Input[str]) -> None:
CustomResource.__init__(self, "test:index:FileResource", name, {
"contents": file_contents
})
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
| [
"pulumi.CustomResource.__init__",
"asyncio.sleep"
]
| [((702, 718), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (715, 718), False, 'import asyncio\n'), ((928, 1023), 'pulumi.CustomResource.__init__', 'CustomResource.__init__', (['self', '"""test:index:FileResource"""', 'name', "{'contents': file_contents}"], {}), "(self, 'test:index:FileResource', name, {'contents':\n file_contents})\n", (951, 1023), False, 'from pulumi import CustomResource, Output, Input\n')] |
import copy
import json
from ghcl.models.pull_request import PullRequest
class PRData:
def __init__(self, data: dict = None):
if data is None:
with open('./tests/models/empty_pr_data.json') as file:
self._data = json.load(file)
else:
self._data = data
def with_pr_url(self, url: str = 'some-url'):
data = copy.deepcopy(self._data)
data['issues_data']['pull_request']['html_url'] = url
return PRData(data)
def with_label(self, label_to_add: str = None):
data = copy.deepcopy(self._data)
if label_to_add is None:
label_number = len(data["issues_data"]["labels"]) + 1
label_to_add = f'label-{label_number}'
data['issues_data']['labels'].append({'name': label_to_add})
return PRData(data)
def with_created_at(self, created_at: str = '2014-04-24T16:34:47Z'):
data = copy.deepcopy(self._data)
data['issues_data']['created_at'] = created_at
return PRData(data)
def with_owner(self, owner: str = 'owner_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['base']['repo']['owner']['login'] = owner
return PRData(data)
def with_pr_raised_by(self, pr_raised_by: str = 'pr_raised_by_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['head']['user']['login'] = pr_raised_by
return PRData(data)
def with_merged(self, merged=False):
data = copy.deepcopy(self._data)
data['pr_data']['merged'] = merged
return PRData(data)
def with_state(self, state='some_state'):
data = copy.deepcopy(self._data)
data['issues_data']['state'] = state
return PRData(data)
def with_defaults(self):
return PRData(self._data).with_pr_url()\
.with_label()\
.with_label()\
.with_created_at()\
.with_owner()\
.with_pr_raised_by()\
.with_merged()\
.with_state()
def as_pull_request(self):
return PullRequest(**self._data)
| [
"json.load",
"ghcl.models.pull_request.PullRequest",
"copy.deepcopy"
]
| [((381, 406), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (394, 406), False, 'import copy\n'), ((565, 590), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (578, 590), False, 'import copy\n'), ((928, 953), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (941, 953), False, 'import copy\n'), ((1109, 1134), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (1122, 1134), False, 'import copy\n'), ((1322, 1347), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (1335, 1347), False, 'import copy\n'), ((1497, 1522), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (1510, 1522), False, 'import copy\n'), ((1656, 1681), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (1669, 1681), False, 'import copy\n'), ((2082, 2107), 'ghcl.models.pull_request.PullRequest', 'PullRequest', ([], {}), '(**self._data)\n', (2093, 2107), False, 'from ghcl.models.pull_request import PullRequest\n'), ((255, 270), 'json.load', 'json.load', (['file'], {}), '(file)\n', (264, 270), False, 'import json\n')] |
# Copyright (c) AT&T 2012-2013 <NAME> <<EMAIL>>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| [
"nova.servicegroup.API",
"eventlet.sleep",
"nova.servicegroup.drivers.zk.ZooKeeperDriver"
]
| [((1656, 1674), 'nova.servicegroup.API', 'servicegroup.API', ([], {}), '()\n', (1672, 1674), False, 'from nova import servicegroup\n'), ((2038, 2055), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (2052, 2055), False, 'import eventlet\n'), ((2188, 2206), 'nova.servicegroup.API', 'servicegroup.API', ([], {}), '()\n', (2204, 2206), False, 'from nova import servicegroup\n'), ((2503, 2520), 'eventlet.sleep', 'eventlet.sleep', (['(1)'], {}), '(1)\n', (2517, 2520), False, 'import eventlet\n'), ((1474, 1494), 'nova.servicegroup.drivers.zk.ZooKeeperDriver', 'zk.ZooKeeperDriver', ([], {}), '()\n', (1492, 1494), False, 'from nova.servicegroup.drivers import zk\n')] |
"""Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| [
"chopsticks.tunnel.Docker"
]
| [((223, 257), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (229, 257), False, 'from chopsticks.tunnel import Docker\n'), ((387, 421), 'chopsticks.tunnel.Docker', 'Docker', (['"""py35"""'], {'image': '"""python:3.5"""'}), "('py35', image='python:3.5')\n", (393, 421), False, 'from chopsticks.tunnel import Docker\n'), ((431, 465), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (437, 465), False, 'from chopsticks.tunnel import Docker\n'), ((624, 658), 'chopsticks.tunnel.Docker', 'Docker', (['"""py35"""'], {'image': '"""python:3.5"""'}), "('py35', image='python:3.5')\n", (630, 658), False, 'from chopsticks.tunnel import Docker\n'), ((668, 702), 'chopsticks.tunnel.Docker', 'Docker', (['"""py36"""'], {'image': '"""python:3.6"""'}), "('py36', image='python:3.6')\n", (674, 702), False, 'from chopsticks.tunnel import Docker\n')] |
import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d) | [
"random.choice",
"turtle.Turtle"
]
| [((32, 47), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (45, 47), False, 'import turtle\n'), ((589, 607), 'random.choice', 'random.choice', (['die'], {}), '(die)\n', (602, 607), False, 'import random\n'), ((792, 810), 'random.choice', 'random.choice', (['die'], {}), '(die)\n', (805, 810), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__
# 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1)
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 测试(60000)和训练集(10000)
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid数据
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x取值',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
# 设置为训练模型
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
# 初始化全局权重
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
# 初始化本地权重
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add 初始化变化量
# print("why?")
for iter in range(args.epochs):
# 初始换控制变量
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
# 每次随机十位幸运观众
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentum法SGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add 更新全局c,w
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| [
"matplotlib.pyplot.ylabel",
"torch.distributed.init_process_group",
"copy.deepcopy",
"utils.sampling.cifar_iid",
"matplotlib.pyplot.xlabel",
"utils.sampling.mnist_noniid",
"torchvision.transforms.ToTensor",
"models.Fed.FedAvg",
"models.Nets.CNNCifar",
"matplotlib.use",
"models.Update.LocalUpdateF",
"models.Nets.CNNMnist",
"torchvision.transforms.Normalize",
"torchvision.datasets.CIFAR10",
"torch.cuda.empty_cache",
"matplotlib.pyplot.legend",
"utils.options.args_parser",
"torch.manual_seed",
"models.Nets.MLP",
"matplotlib.pyplot.figure",
"torchvision.datasets.MNIST",
"utils.sampling.mnist_iid",
"models.Update.LocalUpdate",
"models.test.test_img"
]
| [((87, 108), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (101, 108), False, 'import matplotlib\n'), ((876, 889), 'utils.options.args_parser', 'args_parser', ([], {}), '()\n', (887, 889), False, 'from utils.options import args_parser\n'), ((953, 973), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (970, 973), False, 'import torch\n'), ((1186, 1264), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""gloo"""', 'rank': 'rank', 'world_size': 'args.world_size'}), "(backend='gloo', rank=rank, world_size=args.world_size)\n", (1209, 1264), True, 'import torch.distributed as dist\n'), ((10241, 10253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10251, 10253), True, 'import matplotlib.pyplot as plt\n'), ((10451, 10475), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""train_loss"""'], {}), "('train_loss')\n", (10461, 10475), True, 'import matplotlib.pyplot as plt\n'), ((10480, 10500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (10490, 10500), True, 'import matplotlib.pyplot as plt\n'), ((10505, 10527), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10515, 10527), True, 'import matplotlib.pyplot as plt\n'), ((10714, 10753), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_train', 'args'], {}), '(net_glob, dataset_train, args)\n', (10722, 10753), False, 'from models.test import test_img\n'), ((10780, 10818), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_test', 'args'], {}), '(net_glob, dataset_test, args)\n', (10788, 10818), False, 'from models.test import test_img\n'), ((10958, 10970), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10968, 10970), True, 'import matplotlib.pyplot as plt\n'), ((11190, 11212), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""test_acc"""'], {}), "('test_acc')\n", (11200, 11212), True, 'import matplotlib.pyplot as plt\n'), ((11217, 11237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (11227, 11237), True, 'import matplotlib.pyplot as plt\n'), ((11242, 11264), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (11252, 11264), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1821), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=True, download=True, transform=\n trans_mnist)\n", (1748, 1821), False, 'from torchvision import datasets, transforms\n'), ((1840, 1928), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=False, download=True, transform=\n trans_mnist)\n", (1854, 1928), False, 'from torchvision import datasets, transforms\n'), ((6473, 6489), 'models.Fed.FedAvg', 'FedAvg', (['w_locals'], {}), '(w_locals)\n', (6479, 6489), False, 'from models.Fed import FedAvg\n'), ((7284, 7322), 'models.test.test_img', 'test_img', (['net_glob', 'dataset_test', 'args'], {}), '(net_glob, dataset_test, args)\n', (7292, 7322), False, 'from models.test import test_img\n'), ((9819, 9836), 'models.Fed.FedAvg', 'FedAvg', (['w_localsF'], {}), '(w_localsF)\n', (9825, 9836), False, 'from models.Fed import FedAvg\n'), ((10137, 10176), 'models.test.test_img', 'test_img', (['net_globF', 'dataset_test', 'args'], {}), '(net_globF, dataset_test, args)\n', (10145, 10176), False, 'from models.test import test_img\n'), ((2012, 2052), 'utils.sampling.mnist_iid', 'mnist_iid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2021, 2052), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2092, 2135), 'utils.sampling.mnist_noniid', 'mnist_noniid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2104, 2135), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2316, 2404), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=True, download=True, transform=\n trans_cifar)\n", (2332, 2404), False, 'from torchvision import datasets, transforms\n'), ((2423, 2512), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=False, download=True, transform=\n trans_cifar)\n", (2439, 2512), False, 'from torchvision import datasets, transforms\n'), ((5401, 5468), 'models.Update.LocalUpdate', 'LocalUpdate', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (5412, 5468), False, 'from models.Update import LocalUpdate\n'), ((7439, 7506), 'models.Update.LocalUpdate', 'LocalUpdate', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (7450, 7506), False, 'from models.Update import LocalUpdate\n'), ((7519, 7543), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7541, 7543), False, 'import torch\n'), ((9415, 9483), 'models.Update.LocalUpdateF', 'LocalUpdateF', ([], {'args': 'args', 'dataset': 'dataset_train', 'idxs': 'dict_users[idx]'}), '(args=args, dataset=dataset_train, idxs=dict_users[idx])\n', (9427, 9483), False, 'from models.Update import LocalUpdateF\n'), ((1523, 1544), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1542, 1544), False, 'from torchvision import datasets, transforms\n'), ((1546, 1588), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (1566, 1588), False, 'from torchvision import datasets, transforms\n'), ((2554, 2594), 'utils.sampling.cifar_iid', 'cifar_iid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (2563, 2594), False, 'from utils.sampling import mnist_iid, mnist_noniid, cifar_iid\n'), ((2924, 2943), 'models.Nets.CNNCifar', 'CNNCifar', ([], {'args': 'args'}), '(args=args)\n', (2932, 2943), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5897, 5913), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (5910, 5913), False, 'import copy\n'), ((6032, 6051), 'copy.deepcopy', 'copy.deepcopy', (['loss'], {}), '(loss)\n', (6045, 6051), False, 'import copy\n'), ((8284, 8303), 'models.Nets.CNNCifar', 'CNNCifar', ([], {'args': 'args'}), '(args=args)\n', (8292, 8303), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((9631, 9647), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (9644, 9647), False, 'import copy\n'), ((9748, 9767), 'copy.deepcopy', 'copy.deepcopy', (['loss'], {}), '(loss)\n', (9761, 9767), False, 'import copy\n'), ((2212, 2233), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2231, 2233), False, 'from torchvision import datasets, transforms\n'), ((2235, 2289), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (2255, 2289), False, 'from torchvision import datasets, transforms\n'), ((3037, 3056), 'models.Nets.CNNMnist', 'CNNMnist', ([], {'args': 'args'}), '(args=args)\n', (3045, 3056), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((4140, 4200), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (4143, 4200), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5964, 5980), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (5977, 5980), False, 'import copy\n'), ((8398, 8417), 'models.Nets.CNNMnist', 'CNNMnist', ([], {'args': 'args'}), '(args=args)\n', (8406, 8417), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((9699, 9715), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (9712, 9715), False, 'import copy\n'), ((3222, 3282), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (3225, 3282), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((3338, 3398), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (3341, 3398), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((8554, 8614), 'models.Nets.MLP', 'MLP', ([], {'dim_in': 'len_in', 'dim_hidden': '(200)', 'dim_out': 'args.num_classes'}), '(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes)\n', (8557, 8614), False, 'from models.Nets import MLP, CNNMnist, CNNCifar\n'), ((5551, 5574), 'copy.deepcopy', 'copy.deepcopy', (['net_glob'], {}), '(net_glob)\n', (5564, 5574), False, 'import copy\n'), ((9523, 9547), 'copy.deepcopy', 'copy.deepcopy', (['net_globF'], {}), '(net_globF)\n', (9536, 9547), False, 'import copy\n')] |
from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
class FunctionWithUnitTests(Function):
"""
Function that lets us run unit tests inside lambda function. We want to run unit
tests both locally and remotely.
"""
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
code=Code.from_asset(root),
handler='handler.handler',
runtime=Runtime.PYTHON_3_8,
timeout=Duration.minutes(5),
memory_size=512,
layers=[
Layer(
scope=scope,
name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
dependencies={
# These dependencies are required for running unit tests inside lambda functions.
# Pytest is used for running actual unit tests.
'pytest': PackageVersion.from_string_version('6.2.5'),
# Pook is used for HTTP mocking, therefore it is also needed here.
'pook': PackageVersion.from_string_version('1.0.1'),
# Not sure about this dependency. Lambda runtime throws errors if its missing.
'aws-cdk.core': PackageVersion.from_string_version('1.99.0'),
# This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
# For some reason it doesn't.
# Tests would fail with import error otherwise.
'importlib-resources': PackageVersion.from_string_version('5.4.0')
}
)
]
)
| [
"b_aws_testing_framework.tools.cdk_testing.testing_stack.TestingStack.global_prefix",
"aws_cdk.aws_lambda.Code.from_asset",
"aws_cdk.core.Duration.minutes",
"b_cfn_lambda_layer.package_version.PackageVersion.from_string_version"
]
| [((691, 712), 'aws_cdk.aws_lambda.Code.from_asset', 'Code.from_asset', (['root'], {}), '(root)\n', (706, 712), False, 'from aws_cdk.aws_lambda import Function, Code, Runtime\n'), ((813, 832), 'aws_cdk.core.Duration.minutes', 'Duration.minutes', (['(5)'], {}), '(5)\n', (829, 832), False, 'from aws_cdk.core import Stack, Duration\n'), ((621, 649), 'b_aws_testing_framework.tools.cdk_testing.testing_stack.TestingStack.global_prefix', 'TestingStack.global_prefix', ([], {}), '()\n', (647, 649), False, 'from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack\n'), ((1272, 1315), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""6.2.5"""'], {}), "('6.2.5')\n", (1306, 1315), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1440, 1483), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""1.0.1"""'], {}), "('1.0.1')\n", (1474, 1483), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1628, 1672), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""1.99.0"""'], {}), "('1.99.0')\n", (1662, 1672), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((1977, 2020), 'b_cfn_lambda_layer.package_version.PackageVersion.from_string_version', 'PackageVersion.from_string_version', (['"""5.4.0"""'], {}), "('5.4.0')\n", (2011, 2020), False, 'from b_cfn_lambda_layer.package_version import PackageVersion\n'), ((968, 996), 'b_aws_testing_framework.tools.cdk_testing.testing_stack.TestingStack.global_prefix', 'TestingStack.global_prefix', ([], {}), '()\n', (994, 996), False, 'from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack\n')] |
# <NAME> (<EMAIL>)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| [
"numpy.random.rand",
"mlpaper.util.area",
"numpy.array",
"builtins.range",
"numpy.mean",
"mlpaper.classification.curve_boot",
"scipy.stats.binom_test",
"mlpaper.util.interp1d",
"numpy.dot",
"numpy.linspace",
"numpy.random.seed",
"numpy.min",
"mlpaper.mlpaper.boot_EB",
"numpy.abs",
"numpy.random.multivariate_normal",
"numpy.random.choice",
"mlpaper.mlpaper._boot_EB_and_test",
"numpy.random.randn",
"numpy.zeros",
"numpy.all"
]
| [((3095, 3129), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3103, 3129), True, 'import numpy as np\n'), ((3150, 3184), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3158, 3184), True, 'import numpy as np\n'), ((3206, 3240), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3214, 3240), True, 'import numpy as np\n'), ((3255, 3266), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (3260, 3266), False, 'from builtins import range\n'), ((5793, 5804), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (5798, 5804), False, 'from builtins import range\n'), ((6271, 6288), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6286, 6288), True, 'import numpy as np\n'), ((6831, 6842), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (6836, 6842), False, 'from builtins import range\n'), ((7178, 7199), 'numpy.random.seed', 'np.random.seed', (['(56467)'], {}), '(56467)\n', (7192, 7199), True, 'import numpy as np\n'), ((500, 538), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {}), '(ff, runs, expect_p_fail)\n', (513, 538), True, 'import scipy.stats as ss\n'), ((574, 635), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {'alternative': '"""greater"""'}), "(ff, runs, expect_p_fail, alternative='greater')\n", (587, 635), True, 'import scipy.stats as ss\n'), ((815, 834), 'numpy.min', 'np.min', (['pvals_2side'], {}), '(pvals_2side)\n', (821, 834), True, 'import numpy as np\n'), ((895, 914), 'numpy.min', 'np.min', (['pvals_1side'], {}), '(pvals_1side)\n', (901, 914), True, 'import numpy as np\n'), ((1309, 1321), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1313, 1321), False, 'from mlpaper.util import area, interp1d\n'), ((1395, 1407), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1399, 1407), False, 'from mlpaper.util import area, interp1d\n'), ((1431, 1448), 'mlpaper.util.area', 'area', (['*true_curve'], {}), '(*true_curve)\n', (1435, 1448), False, 'from mlpaper.util import area, interp1d\n'), ((1458, 1478), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1472, 1478), True, 'import numpy as np\n'), ((1513, 1616), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'true_value', 'curve_f': 'curve_f', 'confidence': 'confidence', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=true_value, curve_f=curve_f, confidence=\n confidence, x_grid=x_grid)\n', (1523, 1616), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((1661, 1706), 'mlpaper.util.interp1d', 'interp1d', (['curve[cc.XGRID].values', '*true_curve'], {}), '(curve[cc.XGRID].values, *true_curve)\n', (1669, 1706), False, 'from mlpaper.util import area, interp1d\n'), ((2130, 2150), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2144, 2150), True, 'import numpy as np\n'), ((2187, 2310), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(False)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=False, x_grid=x_grid)\n', (2197, 2310), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2514, 2551), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2520, 2551), True, 'import numpy as np\n'), ((2561, 2581), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2575, 2581), True, 'import numpy as np\n'), ((2618, 2740), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(True)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=True, x_grid=x_grid)\n', (2628, 2740), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2952, 2989), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2958, 2989), True, 'import numpy as np\n'), ((3281, 3299), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (3296, 3299), True, 'import numpy as np\n'), ((3312, 3333), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (3327, 3333), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.dot', 'np.dot', (['S', 'S.T'], {}), '(S, S.T)\n', (3352, 3360), True, 'import numpy as np\n'), ((3474, 3511), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (3485, 3511), True, 'import numpy as np\n'), ((3651, 3695), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'S'], {'size': 'N'}), '(mu, S, size=N)\n', (3680, 3695), True, 'import numpy as np\n'), ((4119, 4157), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'DEFAULT_NGRID'], {}), '(0.05, 0.95, DEFAULT_NGRID)\n', (4130, 4157), True, 'import numpy as np\n'), ((4886, 4923), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (4897, 4923), True, 'import numpy as np\n'), ((5819, 5836), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5834, 5836), True, 'import numpy as np\n'), ((5933, 5943), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (5940, 5943), True, 'import numpy as np\n'), ((5957, 5987), 'mlpaper.mlpaper.boot_EB', 'bt.boot_EB', (['x'], {'confidence': '(0.95)'}), '(x, confidence=0.95)\n', (5967, 5987), True, 'import mlpaper.mlpaper as bt\n'), ((6308, 6325), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6323, 6325), True, 'import numpy as np\n'), ((6416, 6478), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['x'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x, confidence=confidence, return_CI=True)\n', (6436, 6478), True, 'import mlpaper.mlpaper as bt\n'), ((6579, 6654), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['(x - true_value)'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x - true_value, confidence=confidence, return_CI=True)\n', (6599, 6654), True, 'import mlpaper.mlpaper as bt\n'), ((1752, 1776), 'numpy.abs', 'np.abs', (['(auc - true_value)'], {}), '(auc - true_value)\n', (1758, 1776), True, 'import numpy as np\n'), ((2080, 2120), 'numpy.all', 'np.all', (['(curve[cc.XGRID].values == x_grid)'], {}), '(curve[cc.XGRID].values == x_grid)\n', (2086, 2120), True, 'import numpy as np\n'), ((2804, 2825), 'numpy.abs', 'np.abs', (['(auc - auc_ref)'], {}), '(auc - auc_ref)\n', (2810, 2825), True, 'import numpy as np\n'), ((3534, 3556), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3542, 3556), True, 'import numpy as np\n'), ((3558, 3580), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3566, 3580), True, 'import numpy as np\n'), ((3610, 3627), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (3624, 3627), True, 'import numpy as np\n'), ((3707, 3724), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3722, 3724), True, 'import numpy as np\n'), ((3777, 3818), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'N', 'replace': '(True)'}), '(N, size=N, replace=True)\n', (3793, 3818), True, 'import numpy as np\n'), ((4492, 4514), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4500, 4514), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.array', 'np.array', (['[[p, p]]'], {}), '([[p, p]])\n', (4524, 4534), True, 'import numpy as np\n'), ((4946, 4968), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4954, 4968), True, 'import numpy as np\n'), ((4970, 4992), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (4978, 4992), True, 'import numpy as np\n'), ((5856, 5873), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5871, 5873), True, 'import numpy as np\n'), ((6005, 6024), 'numpy.abs', 'np.abs', (['(mu - mu_est)'], {}), '(mu - mu_est)\n', (6011, 6024), True, 'import numpy as np\n'), ((3926, 3937), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3934, 3937), True, 'import numpy as np\n'), ((3989, 4000), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3997, 4000), True, 'import numpy as np\n'), ((5896, 5914), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (5911, 5914), True, 'import numpy as np\n'), ((6869, 6887), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (6884, 6887), True, 'import numpy as np\n')] |
from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
| [
"pandas.Series",
"core.timeseries_study.TimeSeriesDailyStudy",
"core.timeseries_study.map_dict_to_dataframe",
"core.artificial_signal_generators.ArmaProcess",
"numpy.array",
"helpers.unit_test.convert_df_to_string",
"pandas.date_range",
"core.timeseries_study.TimeSeriesMinutelyStudy"
]
| [((323, 364), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {}), "('2018-12-31', '2019-01-31')\n", (336, 364), True, 'import pandas as pd\n'), ((419, 445), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (428, 445), True, 'import pandas as pd\n'), ((461, 489), 'core.timeseries_study.TimeSeriesDailyStudy', 'tss.TimeSeriesDailyStudy', (['ts'], {}), '(ts)\n', (485, 489), True, 'import core.timeseries_study as tss\n'), ((617, 669), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {'freq': '"""5T"""'}), "('2018-12-31', '2019-01-31', freq='5T')\n", (630, 669), True, 'import pandas as pd\n'), ((724, 750), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (733, 750), True, 'import pandas as pd\n'), ((766, 820), 'core.timeseries_study.TimeSeriesMinutelyStudy', 'tss.TimeSeriesMinutelyStudy', (['ts'], {'freq_name': '"""5 minutes"""'}), "(ts, freq_name='5 minutes')\n", (793, 820), True, 'import core.timeseries_study as tss\n'), ((1159, 1225), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs'}), '(dict_=result_dict, functions=stat_funcs)\n', (1184, 1225), True, 'import core.timeseries_study as tss\n'), ((1272, 1316), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1296, 1316), True, 'import helpers.unit_test as hut\n'), ((1623, 1711), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'add_prefix': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n add_prefix=False)\n', (1648, 1711), True, 'import core.timeseries_study as tss\n'), ((1779, 1823), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1803, 1823), True, 'import helpers.unit_test as hut\n'), ((2130, 2220), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'progress_bar': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n progress_bar=False)\n', (2155, 2220), True, 'import core.timeseries_study as tss\n'), ((2288, 2332), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (2312, 2332), True, 'import helpers.unit_test as hut\n'), ((2457, 2480), 'numpy.array', 'np.array', (['[0.75, -0.25]'], {}), '([0.75, -0.25])\n', (2465, 2480), True, 'import numpy as np\n'), ((2500, 2522), 'numpy.array', 'np.array', (['[0.65, 0.35]'], {}), '([0.65, 0.35])\n', (2508, 2522), True, 'import numpy as np\n'), ((2546, 2585), 'core.artificial_signal_generators.ArmaProcess', 'sig_gen.ArmaProcess', (['arparams', 'maparams'], {}), '(arparams, maparams)\n', (2565, 2585), True, 'import core.artificial_signal_generators as sig_gen\n')] |
# VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| [
"random.random",
"statsmodels.tsa.vector_ar.var_model.VAR"
]
| [((268, 277), 'statsmodels.tsa.vector_ar.var_model.VAR', 'VAR', (['data'], {}), '(data)\n', (271, 277), False, 'from statsmodels.tsa.vector_ar.var_model import VAR\n'), ((176, 184), 'random.random', 'random', ([], {}), '()\n', (182, 184), False, 'from random import random\n'), ((199, 207), 'random.random', 'random', ([], {}), '()\n', (205, 207), False, 'from random import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
def numericLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)')
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if num_match.search(str(item[0])):
instr[index] = (instr[index][1], instr[index][1])
else:
if num_match.search(str(item)):
instr[index] = (instr[index], instr[index][1])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
numericLemmatizer(instr)
else:
print("not supported")
return(instr)
def defaultLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) != tup):
instr[index] = (instr[index], instr[index])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
defaultLemmatizer(instr)
else:
print("not supported")
return(instr)
def lookupLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
lemmatalist = lt.drawlist()
words = []
lemma = []
for item in lemmatalist:
words.append(item.split("\t")[0])
lemma.append(item.split("\t")[1])
tokens = set(words)
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index][1],tag)
else:
if(type(item) != tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index], tag)
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
lookupLemmatizer(instr)
else:
print("not supported")
return(instr)
def Lemmatizer(instr):
instr = lookupLemmatizer(instr)
instr = numericLemmatizer(instr)
instr = defaultLemmatizer(instr)
return(instr)
if __name__ == '__main__':
input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।'
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| [
"sangita.hindi.corpora.lemmata.drawlist",
"sangita.hindi.tokenizer.tokenize",
"re.compile"
]
| [((337, 446), 're.compile', 're.compile', (['"""([०१२३४५६७८९]+[\\\\.\\\\,]*)+[०१२३४५६७८९]+|([-+]*\\\\d+[\\\\.\\\\,]*)+\\\\d+|([०१२३४५६७८९]+|\\\\d+)"""'], {}), "(\n '([०१२३४५६७८९]+[\\\\.\\\\,]*)+[०१२३४५६७८९]+|([-+]*\\\\d+[\\\\.\\\\,]*)+\\\\d+|([०१२३४५६७८९]+|\\\\d+)'\n )\n", (347, 446), False, 'import re\n'), ((1676, 1689), 'sangita.hindi.corpora.lemmata.drawlist', 'lt.drawlist', ([], {}), '()\n', (1687, 1689), True, 'import sangita.hindi.corpora.lemmata as lt\n'), ((901, 920), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (913, 920), True, 'import sangita.hindi.tokenizer as tok\n'), ((1397, 1416), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (1409, 1416), True, 'import sangita.hindi.tokenizer as tok\n'), ((2414, 2433), 'sangita.hindi.tokenizer.tokenize', 'tok.tokenize', (['instr'], {}), '(instr)\n', (2426, 2433), True, 'import sangita.hindi.tokenizer as tok\n')] |
from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='<EMAIL>',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
| [
"django.contrib.auth.get_user_model",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"django_dynamic_fixture.G",
"icekit.page_types.layout_page.models.LayoutPage",
"django.contrib.sites.models.Site.objects.first",
"icekit.utils.fluent_contents.create_content_instance"
]
| [((447, 463), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (461, 463), False, 'from django.contrib.auth import get_user_model\n'), ((1461, 1515), 'django_dynamic_fixture.G', 'G', (['Layout'], {'template_name': '"""icekit/layouts/default.html"""'}), "(Layout, template_name='icekit/layouts/default.html')\n", (1462, 1515), False, 'from django_dynamic_fixture import G\n'), ((1871, 1883), 'icekit.page_types.layout_page.models.LayoutPage', 'LayoutPage', ([], {}), '()\n', (1881, 1883), False, 'from icekit.page_types.layout_page.models import LayoutPage\n'), ((1997, 2017), 'django.contrib.sites.models.Site.objects.first', 'Site.objects.first', ([], {}), '()\n', (2015, 2017), False, 'from django.contrib.sites.models import Site\n'), ((2202, 2303), 'icekit.utils.fluent_contents.create_content_instance', 'fluent_contents.create_content_instance', (['models.MapItem', 'self.page_1'], {'_embed_code': 'self.embed_code'}), '(models.MapItem, self.page_1,\n _embed_code=self.embed_code)\n', (2241, 2303), False, 'from icekit.utils import fluent_contents\n'), ((1604, 1649), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['LayoutPage'], {}), '(LayoutPage)\n', (1637, 1649), False, 'from django.contrib.contenttypes.models import ContentType\n')] |
"""
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def process_video(filename):
cap = cv2.VideoCapture(filename)
tasks = list()
frame_ind = 0
while cap.isOpened():
ret, frame = cap.read()
tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind)))
frame_ind += 1
await asyncio.sleep(0)
await asyncio.gather(tasks)
async def process_frame(frame, frame_ind):
logger.info("Processing frame {}".format(frame_ind))
await asyncio.sleep(20.0)
logger.info("Finished processing frame {}".format(frame_ind))
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"asyncio.sleep",
"cv2.VideoCapture",
"asyncio.gather",
"asyncio.get_event_loop"
]
| [((92, 160), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(thread)-5d]%(asctime)s: %(message)s"""'}), "(format='[%(thread)-5d]%(asctime)s: %(message)s')\n", (111, 160), False, 'import logging\n'), ((170, 196), 'logging.getLogger', 'logging.getLogger', (['"""async"""'], {}), "('async')\n", (187, 196), False, 'import logging\n'), ((274, 300), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (290, 300), False, 'import cv2\n'), ((782, 806), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (804, 806), False, 'import asyncio\n'), ((537, 558), 'asyncio.gather', 'asyncio.gather', (['tasks'], {}), '(tasks)\n', (551, 558), False, 'import asyncio\n'), ((671, 690), 'asyncio.sleep', 'asyncio.sleep', (['(20.0)'], {}), '(20.0)\n', (684, 690), False, 'import asyncio\n'), ((510, 526), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (523, 526), False, 'import asyncio\n')] |
#!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
@dataclass
class VideoCard:
type = "graphics-card"
manufacturer_brand = ""
reseller_brand = ""
internal_name = ""
model = ""
capacity = -1 # bytes
warning = ""
def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False):
try:
with open(lspci_path, "r") as f:
lspci_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(lspci_path)
lspci_sections = lspci_output.split("\n\n")
for section in lspci_sections:
if "VGA compatible controller" in section:
first_line = section.splitlines()[0].split(": ", 1)[
1
] # removes "VGA compatible controller:"
second_line = section.splitlines()[1]
part_between_square_brackets = None
try:
# take the first string between [] from the first line
part_between_square_brackets = first_line.split("[")[1].split("]")[0]
except IndexError:
# there may not be an argument in between []
pass
if "Subsystem:" in second_line:
# The model or model family is often repeated here, but removing it automatically is complicated
gpu.reseller_brand = (
second_line.split("Subsystem: ")[1].split("[", 1)[0].strip()
)
gpu.reseller_brand = gpu.reseller_brand.replace(
"Integrated Graphics Controller", ""
)
# -----------------------------------------------------------------
# AMD/ATI
# -----------------------------------------------------------------
if part_between_square_brackets is not None and (
"AMD" in part_between_square_brackets
or "ATI" in part_between_square_brackets
):
gpu.manufacturer_brand = part_between_square_brackets
# take second string between []
gpu.model = first_line.split("[")[2].split("]")[0]
if "controller" in gpu.model:
gpu.model = section.splitlines()[1].split(" ")[-1]
# -----------------------------------------------------------------
# Nvidia
# -----------------------------------------------------------------
elif "NVIDIA" in first_line.upper():
gpu.manufacturer_brand = "Nvidia"
gpu.model = part_between_square_brackets
if gpu.reseller_brand != "":
pieces = gpu.reseller_brand.rsplit(" ", 1)
gpu.reseller_brand = pieces[0]
gpu.internal_name = pieces[1]
# -----------------------------------------------------------------
# Intel
# -----------------------------------------------------------------
elif "INTEL" in first_line.upper():
gpu.manufacturer_brand = "Intel"
if "Integrated Graphics" in first_line:
tmp_model = first_line.split("Intel Corporation ")[1].split(
" Integrated Graphics"
)[0]
# if there are no numbers, e.g. "Core Processor", tmp_model is not a model number
if not re.search("\\d+", tmp_model):
tmp_model = ""
elif "HD Graphics" in first_line:
tmp_model = (
first_line.split("Intel Corporation ")[1]
.split("(", 1)[0]
.strip()
)
elif "[" in first_line and "]" in first_line:
tmp_model = first_line.split("[")[1].split("]")[0]
else:
tmp_model = ""
if tmp_model != "":
gpu.model = tmp_model
else:
gpu.model = ""
# -----------------------------------------------------------------
# VIA
# -----------------------------------------------------------------
elif first_line.startswith("VIA"):
gpu.manufacturer_brand = "VIA"
gpu.model = part_between_square_brackets
tmp_model = first_line.split("[")[0]
i = 0
for i, char in enumerate("VIA Technologies, Inc. "):
if tmp_model[i] != char:
break
gpu.internal_name = tmp_model[i:].strip()
# -----------------------------------------------------------------
# SiS
# -----------------------------------------------------------------
elif part_between_square_brackets == "SiS":
# May be written somewhere else on other models, but we have so few SiS cards that it's difficult to
# find more examples. Also, they haven't made any video card in the last 15 years or so.
gpu.manufacturer_brand = part_between_square_brackets
if gpu.reseller_brand.lower() == "silicon integrated systems":
gpu.reseller_brand = "SiS"
gpu.model = first_line.split("]", 1)[1]
# These may be useful for non-integrated cards, however the example ones are all integrated
if " PCIE" in gpu.model:
gpu.model = gpu.model.split(" PCIE", 1)[0].strip()
elif " PCI/AGP" in gpu.model:
gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip()
if gpu.model in gpu.reseller_brand:
gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[
0
].strip()
else:
gpu.manufacturer_brand = None
error = (
"I couldn't find the Video Card brand. The model was set to 'None' and is to be edited "
"logging into the TARALLO afterwards. The information you're looking for should be in the "
f"following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
if gpu.model is None:
error = (
"I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be "
"edited logging into the TARALLO afterwards. The information you're looking for should be in "
f"the following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
else:
# Try to remove duplicate information
gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip()
if gpu.internal_name is not None:
# Same
gpu.reseller_brand = gpu.reseller_brand.replace(
gpu.internal_name, ""
).strip()
break
def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str):
try:
with open(glxinfo_path, "r") as f:
glxinfo_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(glxinfo_path)
for i, line in enumerate(glxinfo_output.splitlines()):
# this line comes before the "Dedicated video memory" line
# this basically saves a default value if the dedicated memory line cannot be found
if "Video memory" in line:
try:
tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2])
tmp_vid_mem_multiplier = line[-2:]
except ValueError:
exit(-1)
return # To stop complaints from PyCharm
gpu.capacity = convert_video_memory_size(
tmp_vid_mem, tmp_vid_mem_multiplier
)
if "Dedicated video memory" in line:
try:
tmp_vram = int(line.split(" ")[7].split(" ")[0])
tmp_vram_multiplier = line[-2:]
except ValueError:
exit(-1)
return
capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier)
if capacity < 0:
gpu.warning = "Could not find dedicated video memory"
if gpu.capacity < 0:
gpu.warning += ". The value cannot be trusted."
else:
gpu.capacity = capacity
break
if gpu.capacity > 0:
# Round to the next power of 2
# this may be different from human readable capacity...
rounded = 2 ** (gpu.capacity - 1).bit_length()
one_and_half = int(rounded / 2 * 1.5)
# Accounts for 3 GB VRAM cards and similar
# Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB)
if one_and_half >= gpu.capacity:
gpu.capacity = one_and_half
else:
gpu.capacity = rounded
def convert_video_memory_size(capacity, units_of_measure):
if units_of_measure == "GB":
capacity *= 1024 * 1024 * 1024
elif units_of_measure == "MB":
capacity *= 1024 * 1024
elif units_of_measure.upper() == "KB":
capacity *= 1024
else:
capacity = -1
return capacity
def read_lspci_and_glxinfo(
has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False
):
gpu = VideoCard()
if has_dedicated:
parse_lspci_output(gpu, lspci_path, interactive)
parse_glxinfo_output(gpu, glxinfo_path)
else: # integrated_in_mobo or integrated_in_cpu
parse_lspci_output(gpu, lspci_path, interactive)
# don't parse glxinfo because the VRAM is part of the RAM and varies
gpu.capacity = None
# print("The VRAM capacity could not be detected. "
# "Please try looking for it on the Video Card or on the Internet. "
# "The capacity value defaulted to 'None'. "
# "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.")
result = {
"type": "graphics-card",
"brand": gpu.reseller_brand.strip(),
"model": gpu.model.strip(),
"internal-name": gpu.internal_name.strip(),
"capacity-byte": gpu.capacity,
"working": "yes", # Indeed it is working
}
if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None:
if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower():
result["brand-manufacturer"] = gpu.manufacturer_brand
return result
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| [
"InputFileNotFoundError.InputFileNotFoundError",
"argparse.ArgumentParser",
"re.search"
]
| [((11116, 11181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse lspci/glxinfo output"""'}), "(description='Parse lspci/glxinfo output')\n", (11139, 11181), False, 'import argparse\n'), ((577, 611), 'InputFileNotFoundError.InputFileNotFoundError', 'InputFileNotFoundError', (['lspci_path'], {}), '(lspci_path)\n', (599, 611), False, 'from InputFileNotFoundError import InputFileNotFoundError\n'), ((7596, 7632), 'InputFileNotFoundError.InputFileNotFoundError', 'InputFileNotFoundError', (['glxinfo_path'], {}), '(glxinfo_path)\n', (7618, 7632), False, 'from InputFileNotFoundError import InputFileNotFoundError\n'), ((3542, 3570), 're.search', 're.search', (['"""\\\\d+"""', 'tmp_model'], {}), "('\\\\d+', tmp_model)\n", (3551, 3570), False, 'import re\n')] |
#!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run(task):
# Initialization
task.check_conf(["entities"])
conf = task.conf
log = task.logger()
task.check_in_ports(["oncodrive_ids"])
task.check_out_ports(["combinations"])
oncodrive_port = task.ports["oncodrive_ids"]
combination_port = task.ports["combinations"]
es = EntityServer(conf["entities"])
em = es.manager()
log.info("Indexing available combination results ...")
comb_results_index = em.group_ids(
["icdo_topography", "icdo_morphology", "id_type"],
types.MRNA_COMBINATION, unique = True)
ENSEMBL_GENE = "ensembl:gene"
classif = {}
log.info("Classifying oncodrive results ...")
for oid in oncodrive_port:
o = em.find(oid, types.MRNA_ONCODRIVE_GENES)
if o is None:
log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid))
continue
okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"])
key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE)
log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key)))
if key in classif:
classif[key] += [o]
else:
classif[key] = [o]
log.info("Preparing combinations ...")
for key in sorted(classif):
if key in comb_results_index:
cid = comb_results_index[key][0]
c = em.find(cid, types.MRNA_COMBINATION)
if c is None:
log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid))
return
else:
c = DataElement(key_sep = "/")
c["id"] = cid = str(uuid.uuid4())
c["icdo_topography"] = key[0]
c["icdo_morphology"] = key[1]
c["id_type"] = ENSEMBL_GENE
olist = classif[key]
log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist)))
ids = c.create_list()
flist = c.create_list()
for o in olist:
ids += [o["id"]]
flist += [o["results_file"]]
c["source"] = src = c.create_element()
src["type"] = types.MRNA_ONCODRIVE_GENES
src["ids"] = ids
c["files"] = flist
combination_port.write(json.dumps(c.to_native()))
em.close()
if __name__ == "__main__":
Task(run).start()
| [
"wok.element.DataElement",
"uuid.uuid4",
"wok.task.Task",
"intogen.data.entity.server.EntityServer"
]
| [((849, 879), 'intogen.data.entity.server.EntityServer', 'EntityServer', (["conf['entities']"], {}), "(conf['entities'])\n", (861, 879), False, 'from intogen.data.entity.server import EntityServer\n'), ((2006, 2030), 'wok.element.DataElement', 'DataElement', ([], {'key_sep': '"""/"""'}), "(key_sep='/')\n", (2017, 2030), False, 'from wok.element import DataElement\n'), ((2619, 2628), 'wok.task.Task', 'Task', (['run'], {}), '(run)\n', (2623, 2628), False, 'from wok.task import Task\n'), ((2056, 2068), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2066, 2068), False, 'import uuid\n')] |
import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
| [
"GeneralStats.GeneralStats",
"numpy.array",
"pandas.Series",
"scipy.stats.skew"
]
| [((178, 195), 'GeneralStats.GeneralStats', 'gs.GeneralStats', ([], {}), '()\n', (193, 195), True, 'import GeneralStats as gs\n'), ((208, 286), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (216, 286), True, 'import numpy as np\n'), ((295, 320), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (303, 320), True, 'import numpy as np\n'), ((531, 609), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (539, 609), True, 'import numpy as np\n'), ((618, 643), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (626, 643), True, 'import numpy as np\n'), ((790, 868), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (798, 868), True, 'import numpy as np\n'), ((877, 902), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (885, 902), True, 'import numpy as np\n'), ((1045, 1123), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (1053, 1123), True, 'import numpy as np\n'), ((1132, 1157), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1140, 1157), True, 'import numpy as np\n'), ((2074, 2152), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2082, 2152), True, 'import numpy as np\n'), ((2161, 2186), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2169, 2186), True, 'import numpy as np\n'), ((2329, 2407), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2337, 2407), True, 'import numpy as np\n'), ((2416, 2441), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2424, 2441), True, 'import numpy as np\n'), ((2590, 2668), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2598, 2668), True, 'import numpy as np\n'), ((2677, 2702), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2685, 2702), True, 'import numpy as np\n'), ((2861, 2939), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2869, 2939), True, 'import numpy as np\n'), ((2948, 2973), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2956, 2973), True, 'import numpy as np\n'), ((3323, 3401), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (3331, 3401), True, 'import numpy as np\n'), ((3410, 3444), 'numpy.array', 'np.array', (['[53, 61, 49, 66, 78, 47]'], {}), '([53, 61, 49, 66, 78, 47])\n', (3418, 3444), True, 'import numpy as np\n'), ((3597, 3615), 'pandas.Series', 'pd.Series', (['data[0]'], {}), '(data[0])\n', (3606, 3615), True, 'import pandas as pd\n'), ((3628, 3646), 'pandas.Series', 'pd.Series', (['data[1]'], {}), '(data[1])\n', (3637, 3646), True, 'import pandas as pd\n'), ((3659, 3677), 'pandas.Series', 'pd.Series', (['data[2]'], {}), '(data[2])\n', (3668, 3677), True, 'import pandas as pd\n'), ((3690, 3708), 'pandas.Series', 'pd.Series', (['data[3]'], {}), '(data[3])\n', (3699, 3708), True, 'import pandas as pd\n'), ((3820, 3836), 'pandas.Series', 'pd.Series', (['data1'], {}), '(data1)\n', (3829, 3836), True, 'import pandas as pd\n'), ((3251, 3262), 'scipy.stats.skew', 'skew', (['data1'], {}), '(data1)\n', (3255, 3262), False, 'from scipy.stats import skew\n'), ((3129, 3142), 'scipy.stats.skew', 'skew', (['data[0]'], {}), '(data[0])\n', (3133, 3142), False, 'from scipy.stats import skew\n'), ((3143, 3156), 'scipy.stats.skew', 'skew', (['data[1]'], {}), '(data[1])\n', (3147, 3156), False, 'from scipy.stats import skew\n'), ((3157, 3170), 'scipy.stats.skew', 'skew', (['data[2]'], {}), '(data[2])\n', (3161, 3170), False, 'from scipy.stats import skew\n'), ((3171, 3184), 'scipy.stats.skew', 'skew', (['data[3]'], {}), '(data[3])\n', (3175, 3184), False, 'from scipy.stats import skew\n')] |
"""
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
]
| [((146, 167), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (160, 167), False, 'import matplotlib\n'), ((207, 241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)', 'dpi': '(72)'}), '(figsize=(5, 4), dpi=72)\n', (217, 241), True, 'import matplotlib.pyplot as plt\n'), ((291, 313), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(200)'], {}), '(0, 2, 200)\n', (302, 313), True, 'import numpy as np\n'), ((318, 339), 'numpy.sin', 'np.sin', (['(2 * np.pi * X)'], {}), '(2 * np.pi * X)\n', (324, 339), True, 'import numpy as np\n'), ((336, 356), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'lw': '(2)'}), '(X, Y, lw=2)\n', (344, 356), True, 'import matplotlib.pyplot as plt\n'), ((357, 376), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (365, 376), True, 'import matplotlib.pyplot as plt\n'), ((377, 387), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((389, 399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (397, 399), True, 'import matplotlib.pyplot as plt\n')] |
import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| [
"os.path.join",
"pfio.io.create_fs_handler",
"re.compile"
]
| [((409, 446), 're.compile', 're.compile', (['"""file:\\\\/\\\\/(?P<path>.+)"""'], {}), "('file:\\\\/\\\\/(?P<path>.+)')\n", (419, 446), False, 'import re\n'), ((474, 511), 're.compile', 're.compile', (['"""(?P<path>hdfs:\\\\/\\\\/.+)"""'], {}), "('(?P<path>hdfs:\\\\/\\\\/.+)')\n", (484, 511), False, 'import re\n'), ((1531, 1557), 'pfio.io.create_fs_handler', 'create_fs_handler', (['fs_type'], {}), '(fs_type)\n', (1548, 1557), False, 'from pfio.io import IO, create_fs_handler\n'), ((3182, 3222), 'os.path.join', 'os.path.join', (['self._root', 'formatted_path'], {}), '(self._root, formatted_path)\n', (3194, 3222), False, 'import os\n'), ((3549, 3589), 'os.path.join', 'os.path.join', (['self._root', 'formatted_path'], {}), '(self._root, formatted_path)\n', (3561, 3589), False, 'import os\n'), ((1800, 1838), 'pfio.io.create_fs_handler', 'create_fs_handler', (['uri_or_handler_name'], {}), '(uri_or_handler_name)\n', (1817, 1838), False, 'from pfio.io import IO, create_fs_handler\n')] |
from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
}
def cmd_handler(cmdline):
global binary_info
global js_scanner
global bindata_scanner
global paratext_scanner
global _platform
ty = binary_info["type"]
parser = binary_info["p"]
s_cmd = cmdline.split(" ")
cmd = s_cmd[0]
arg = s_cmd[1:]
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
print(">> "+cmdline)
if cmd == "help":
print("> tree")
print(" Print the structure of target Binary")
print("> dump [binary_name] [directory]")
print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)")
print("> show-hex [binary_name]")
print(" Print hexcidecimal view of specific OLE or Zipped Binary")
print("> scan")
print(" re-scanning the target file")
print("> exit")
print(" quit command liner")
return 1
elif cmd == "clear":
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
return 0
elif cmd == "tree":
if ty == "hwp":
parser.ole_container.print_dir_entry_all()
else:
for file in parser.filelist:
print(file.filename)
return 0
elif cmd == "dump":
if len(arg) > 1:
binary_name, target_dir = arg[0], arg[1]
else:
binary_name, target_dir = arg[0], None
if not target_dir:
target_dir = os.getcwd()
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
targ = ""
for file in parser.filelist:
fname = file.filename.split("/")[-1]
if fname == binary_name:
targ = file.filename
break
if not targ:
print("no file exist")
return 0
stream = parser.read(targ)
with open(target_dir+"/"+binary_name, "wb") as f:
f.write(stream)
print("dump succeed..")
return 1
elif cmd == "show-hex":
binary_name = arg[0]
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
stream = parser.read(binary_name)
print_hexdump(stream)
return 1
elif cmd == "scan":
if ty == "hwp":
bindata_scanner.scan()
js_scanner.scan()
else:
paratext_scanner.scan()
return 1
elif cmd == "exit":
return -1
else:
print("unknown command..")
return 0
print()
class HWPScanner:
def __init__(self) -> None:
self.__platform__ = platform.platform()
self.hwpx_flag = False
self.ole_parser = OleParser()
self.hwp_parser = None
pass
def parse_hwpdoc(self, file_name):
self.file_name = file_name
self.ole_parser.read_ole_binary(file_name)
try:
self.ole_parser.parse()
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def parse_hwpdoc(self):
try:
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def setup_scanner(self):
if not self.hwpx_flag:
self.js_scanner = JS_Scanner(self.hwp_parser)
self.bindata_scanner = BinData_Scanner(self.hwp_parser)
else:
self.paratext_scanner = ParaText_Scanner(self.hwpx_docs)
def get_file_structure(self):
strt = {}
if not self.hwpx_flag:
self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0)
else:
for _file in self.hwpx_docs.filelist:
_path = os.path.split( _file.filename)
if _path[0] not in strt:
# root
if _path[0]:
strt[_path[0]] = {}
else:
strt[_path[1]] = _file.file_size
continue
cur_strt = strt[_path[0]]
for path in _path:
if path not in strt:
if path == _path[-1]:
cur_strt[path] = _file.file_size
else:
cur_strt[path] = {}
cur_strt = cur_strt[path]
else:
cur_strt = strt[path]
return strt
def scan(self):
scan_result = ""
if not self.hwpx_flag:
scan_result += self.js_scanner.scan()
scan_result += self.bindata_scanner.scan()
else:
scan_result += self.paratext_scanner.scan()
return scan_result | [
"zipfile.ZipFile",
"scan.paratext_scanner.ParaText_Scanner",
"scan.bindata_scanner.BinData_Scanner",
"platform.platform",
"pyparser.oleparser.OleParser",
"os.path.split",
"scan.init_scan.init_hwp5_scan",
"scan.jscript_scanner.JS_Scanner",
"os.getcwd",
"utils.dumphex.print_hexdump",
"os.system",
"pyparser.hwp_parser.HwpParser"
]
| [((957, 973), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (966, 973), False, 'import os\n'), ((992, 1010), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1001, 1010), False, 'import os\n'), ((3501, 3520), 'platform.platform', 'platform.platform', ([], {}), '()\n', (3518, 3520), False, 'import platform\n'), ((3578, 3589), 'pyparser.oleparser.OleParser', 'OleParser', ([], {}), '()\n', (3587, 3589), False, 'from pyparser.oleparser import OleParser\n'), ((3839, 3865), 'pyparser.hwp_parser.HwpParser', 'HwpParser', (['self.ole_parser'], {}), '(self.ole_parser)\n', (3848, 3865), False, 'from pyparser.hwp_parser import HwpParser\n'), ((4602, 4629), 'scan.jscript_scanner.JS_Scanner', 'JS_Scanner', (['self.hwp_parser'], {}), '(self.hwp_parser)\n', (4612, 4629), False, 'from scan.jscript_scanner import JS_Scanner\n'), ((4665, 4697), 'scan.bindata_scanner.BinData_Scanner', 'BinData_Scanner', (['self.hwp_parser'], {}), '(self.hwp_parser)\n', (4680, 4697), False, 'from scan.bindata_scanner import BinData_Scanner\n'), ((4748, 4780), 'scan.paratext_scanner.ParaText_Scanner', 'ParaText_Scanner', (['self.hwpx_docs'], {}), '(self.hwpx_docs)\n', (4764, 4780), False, 'from scan.paratext_scanner import ParaText_Scanner\n'), ((1657, 1673), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1666, 1673), False, 'import os\n'), ((1700, 1718), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1709, 1718), False, 'import os\n'), ((3921, 3963), 'scan.init_scan.init_hwp5_scan', 'init_hwp5_scan', (['self.hwp_parser.hwp_header'], {}), '(self.hwp_parser.hwp_header)\n', (3935, 3963), False, 'from scan.init_scan import init_hwp5_scan\n'), ((4035, 4071), 'zipfile.ZipFile', 'zipfile.ZipFile', (['self.file_name', '"""r"""'], {}), "(self.file_name, 'r')\n", (4050, 4071), False, 'import zipfile\n'), ((5027, 5056), 'os.path.split', 'os.path.split', (['_file.filename'], {}), '(_file.filename)\n', (5040, 5056), False, 'import os\n'), ((2178, 2189), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2187, 2189), False, 'import os\n'), ((3081, 3102), 'utils.dumphex.print_hexdump', 'print_hexdump', (['stream'], {}), '(stream)\n', (3094, 3102), False, 'from utils.dumphex import print_hexdump\n')] |
"""
Tests for plugins in core module.
Only unit tests for now.
"""
from unittest.mock import patch
import click
from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
def test_skip_click_exit():
def dummy_method(a, b):
return a + b
dummy_result = dummy_method(1, 2)
decorated = skip_click_exit(dummy_method)
decorated_result = decorated(1, 2)
assert callable(decorated)
assert dummy_result == decorated_result
def testget_installed_plugins():
class Dummy:
value = "nile.core.plugins.get_installed_plugins"
name = "get_installed_plugins"
with patch("nile.core.plugins.entry_points", return_value=[Dummy()]):
installed_plugins = get_installed_plugins()
assert "get_installed_plugins" in installed_plugins
def test_load_plugins():
@click.group()
def cli():
"""Nile CLI group."""
pass
def dummy():
print("dummy_result")
with patch(
"nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy}
):
app = load_plugins(cli)
assert callable(app)
| [
"nile.core.plugins.get_installed_plugins",
"click.group",
"nile.core.plugins.skip_click_exit",
"nile.core.plugins.load_plugins",
"unittest.mock.patch"
]
| [((333, 362), 'nile.core.plugins.skip_click_exit', 'skip_click_exit', (['dummy_method'], {}), '(dummy_method)\n', (348, 362), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n'), ((846, 859), 'click.group', 'click.group', ([], {}), '()\n', (857, 859), False, 'import click\n'), ((730, 753), 'nile.core.plugins.get_installed_plugins', 'get_installed_plugins', ([], {}), '()\n', (751, 753), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n'), ((976, 1055), 'unittest.mock.patch', 'patch', (['"""nile.core.plugins.get_installed_plugins"""'], {'return_value': "{'dummy': dummy}"}), "('nile.core.plugins.get_installed_plugins', return_value={'dummy': dummy})\n", (981, 1055), False, 'from unittest.mock import patch\n'), ((1085, 1102), 'nile.core.plugins.load_plugins', 'load_plugins', (['cli'], {}), '(cli)\n', (1097, 1102), False, 'from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit\n')] |
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', '<EMAIL>', "test")
self.client.login(username="test", password="<PASSWORD>")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
| [
"django.test.client.Client",
"model_mommy.mommy.make",
"django.contrib.contenttypes.models.ContentType.objects.get",
"users.models.Lageruser.objects.create_superuser"
]
| [((306, 314), 'django.test.client.Client', 'Client', ([], {}), '()\n', (312, 314), False, 'from django.test.client import Client\n'), ((336, 397), 'users.models.Lageruser.objects.create_superuser', 'Lageruser.objects.create_superuser', (['"""test"""', '"""<EMAIL>"""', '"""test"""'], {}), "('test', '<EMAIL>', 'test')\n", (370, 397), False, 'from users.models import Lageruser\n'), ((658, 697), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'model': '"""device"""'}), "(model='device')\n", (681, 697), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((715, 733), 'model_mommy.mommy.make', 'mommy.make', (['Device'], {}), '(Device)\n', (725, 733), False, 'from model_mommy import mommy\n'), ((921, 939), 'model_mommy.mommy.make', 'mommy.make', (['Device'], {}), '(Device)\n', (931, 939), False, 'from model_mommy import mommy\n')] |
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
def moving_average(x, n):
r = []
for i, xi in enumerate(x):
s = 0
cntr = 0
for j in range(max(0,i-n), min(i+n+1, len(x))):
s+=x[j]
cntr+=1
s = s/cntr
r.append(s)
return r
# Utilities used by algorithm 2 ------------------------------------------------
class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))
def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g
class minimizer2(object):
def __init__(self, calculator, min_iterations=0, max_iterations=2000):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.cntr=0
def run(self, use_curvatures=0):
self.minimizer = kbu_refinery.lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.calculator.target()
self.g = self.calculator.gradients()
self.d = None
if (requests_diag):
self.d = self.calculator.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
#print "step: %4d"%self.cntr, "target:", self.f, "params:", \
# " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f)
return self.x, self.f, self.g, self.d
class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
self.F = [f.select(s) for f in self.F]
#
self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version
self.use_curvatures=use_curvatures
self.tgo = mosaic_ext.alg2_tg(
F = [f.data() for f in self.F],
i_obs = self.i_obs.data())
self.update_target_and_grads(x=x)
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo.update(self.x)
self.t = self.tgo.target()
self.g = self.tgo.gradient()
#
# Reference implementation in Python
# s = 1 #180/math.pi
# i_model = flex.double(self.i_obs.data().size(),0)
# for n, kn in enumerate(self.x):
# for m, km in enumerate(self.x):
# tmp = self.F[n].data()*flex.conj(self.F[m].data())
# i_model += kn*km*flex.real(tmp)
# #pn = self.F[n].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fn = flex.abs(self.F[n].data())
# #Fm = flex.abs(self.F[m].data())
# #i_model += kn*km*Fn*Fm*flex.cos(pn-pm)
# diff = i_model - self.i_obs.data()
# #print (flex.min(diff), flex.max(diff))
# t = flex.sum(diff*diff)/4
# #
# g = flex.double()
# for j in range(len(self.F)):
# tmp = flex.double(self.i_obs.data().size(),0)
# for m, km in enumerate(self.x):
# tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
# #pj = self.F[j].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fj = flex.abs(self.F[j].data())
# #Fm = flex.abs(self.F[m].data())
# #tmp += km * Fj*Fm*flex.cos(pj-pm)
# g.append(flex.sum(diff*tmp))
# self.t = t/self.sum_i_obs
# self.g = g/self.sum_i_obs
# #print (self.t,t1)
# #print (list(self.g))
# #print (list(g1))
# #print ()
# #assert approx_equal(self.t, t1, 5)
# #assert approx_equal(self.g, g1, 1.e-6)
#
if self.use_curvatures:
d = flex.double()
for j in range(len(self.F)):
tmp1 = flex.double(self.i_obs.data().size(),0)
tmp2 = flex.double(self.i_obs.data().size(),0)
for m, km in enumerate(self.x):
zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
tmp1 += km * zz
tmp2 += zz
#pj = self.F[j].phases().data()*s
#pm = self.F[m].phases().data()*s
#Fj = flex.abs(self.F[j].data())
#Fm = flex.abs(self.F[m].data())
#tmp += km * Fj*Fm*flex.cos(pj-pm)
d.append(flex.sum(tmp1*tmp1 + tmp2))
self.d=d
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
def curvatures(self): return self.d/self.sum_i_obs
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
class refinery(object):
def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout):
assert alg in ["alg0", "alg2", "alg4", None]
self.log = log
self.f_obs = fmodel.f_obs()
self.r_free_flags = fmodel.r_free_flags()
k_mask_overall = fmodel.k_masks()[0]
self.bin_selections = fmodel.bin_selections
#
k_total = fmodel.k_total()
self.f_calc = fmodel.f_model()
self.F = [self.f_calc.deep_copy()] + fv.keys()
#
n_zones_start = len(self.F)
r4_start = fmodel.r_work4()
for it in range(5):
#
if(it>0):
r4 = self.fmodel.r_work4()
print(r4_start, r4, abs(round(r4-r4_start,4)))
if(abs(round(r4-r4_start,4))<1.e-4):
break
r4_start = r4
#if(it>0 and n_zones_start == len(self.F)): break
#
#if it>0:
# self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:]
self._print("cycle: %2d"%it)
self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]]))
f_obs = self.f_obs.deep_copy()
if it==0: k_total = fmodel.k_total()
else: k_total = self.fmodel.k_total()
i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data())
K_MASKS = OrderedDict()
self.bin_selections = self.f_obs.log_binning(
n_reflections_in_lowest_resolution_bin = 100*len(self.F))
for i_bin, sel in enumerate(self.bin_selections):
d_max, d_min = f_obs.select(sel).d_max_min()
if d_max<3: continue
bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min)
F = [f.select(sel) for f in self.F]
k_total_sel = k_total.select(sel)
F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]]
#
# XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)?
#F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F]
#r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel)
# algorithm_0
if(alg=="alg0"):
k_masks = algorithm_0(
f_obs = f_obs.select(sel),
F = F_scaled,
kt=k_total_sel)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_4
if(alg=="alg4"):
if it==0: phase_source = fmodel.f_model().select(sel)
else: phase_source = self.fmodel.f_model().select(sel)
k_masks = algorithm_4(
f_obs = self.f_obs.select(sel),
F = F_scaled,
auto_converge_eps = 0.0001,
phase_source = phase_source)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_2
if(alg=="alg2"):
k_masks = algorithm_2(
i_obs = i_obs.select(sel),
F = F_scaled,
x = self._get_x_init(i_bin),
use_curvatures = False)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
#self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2))
k_mean = flex.mean(k_mask_overall.select(sel))
k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]]
self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) )
K_MASKS[sel] = [k_masks, k_masks_plus]
#
if(len(self.F)==2): break # stop and fall back onto using largest mask
#
#
#print()
#self.update_k_masks(K_MASKS)
#for k_masks in K_MASKS.values():
# self._print(bin+" ".join(["%6.2f"%k for k in k_masks]))
#
f_calc_data = self.f_calc.data().deep_copy()
f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0)
for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()):
k_masks = k_masks[0] # 1 is shifted!
f_bulk_data_ = flex.complex_double(sel.count(True), 0)
for i_mask, k_mask in enumerate(k_masks):
if i_mask==0:
f_calc_data = f_calc_data.set_selected(sel,
f_calc_data.select(sel)*k_mask)
continue
f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask
f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_)
#
self.update_F(K_MASKS)
f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data)
if(len(self.F)==2):
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
f_calc = fmodel.f_calc(),
f_mask = self.F[1],
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
else:
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.f_calc,
bin_selections = self.bin_selections,
f_mask = f_bulk,
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
#
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.fmodel.f_calc(),
f_mask = self.fmodel.f_bulk(),
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self._print(self.fmodel.r_factors(prefix=" "))
#self._print(self.fmodel.r_factors(prefix=" "))
self.mc = self.fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
#def update_k_masks(self, K_MASKS):
# tmp = []
# for i_mask, F in enumerate(self.F):
# k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()]
# found = False
# for i_bin, k_masks_bin in enumerate(K_MASKS.values()):
# if(not found and k_masks_bin[i_mask]<=0.009):
# found = True
# K_MASKS.values()[i_bin][i_mask]=0
# elif found:
# K_MASKS.values()[i_bin][i_mask]=0
def _print(self, m):
if(self.log is not None):
print(m, file=self.log)
def update_F(self, K_MASKS):
tmp = []
for i_mask, F in enumerate(self.F):
k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()]
if(i_mask == 0): tmp.append(self.F[0])
elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F)
self.F = tmp[:]
def _get_x_init(self, i_bin):
return flex.double([1] + [1]*len(self.F[1:]))
#k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections)
#x = flex.double([1,k_maks1_init])
#x.extend( flex.double(len(self.F)-2, 0.1))
#return x
def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None):
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
n_real = crystal_gridding.n_real()
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs)
mask_params = masks.mask_master_params.extract()
grid_step_factor = ma.d_min()/step
if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink
if(r_sol is not None): mask_params.solvent_radius = r_sol
mask_params.grid_step_factor = grid_step_factor
# 1
if(option==1):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
group = xrs.space_group(),
resolution = ma.d_min(),
grid_step_factor = grid_step_factor,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 2
elif(option==2):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
space_group = xrs.space_group(),
gridding_n_real = n_real,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 3
elif(option==3):
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xrs,
p1 = True,
for_structure_factors = True,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
n_real = n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
mask = asu_map_ext.asymmetric_map(
xrs.crystal_symmetry().space_group().type(), mask_p1).data()
f_mask = ma.structure_factors_from_asu_map(
asu_map_data = mask, n_real = n_real)
# 4
elif(option==4):
f_mask = masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
grid_step = step,
atom_radii = atom_radii).structure_factors(
miller_set = ma)
elif(option==5):
o = mmtbx.masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
gridding_n_real = n_real,
atom_radii = atom_radii)
assert approx_equal(n_real, o.data.accessor().all())
f_mask = o.structure_factors(ma)
elif(option==6):
# XXX No control over n_real, so results with others don't match
mask_manager = masks.manager(
miller_array = ma,
miller_array_twin = None,
mask_params = mask_params)
f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0]
else: assert 0
#
return f_mask
def filter_mask(mask_p1, volume_cutoff, crystal_symmetry,
for_structure_factors = False):
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = True,
wrapping = True)
mi, ma = flex.min(mask_p1), flex.max(mask_p1)
print (mask_p1.size(), (mask_p1<0).count(True))
assert mi == 0, mi
assert ma == 1, ma
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
na,nb,nc = mask_p1.accessor().all()
step = flex.mean(flex.double([a/na, b/nb, c/nc]))
if(crystal_symmetry.space_group_number() != 1):
co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group())
conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
if(i==0): continue # skip macromolecule
# skip small volume
volume = v*step**3
if volume < volume_cutoff:
conn = conn.set_selected(conn==i, 0)
conn = conn.set_selected(conn>0, 1)
if for_structure_factors:
conn = conn / crystal_symmetry.space_group().order_z()
return conn
class mosaic_f_mask(object):
def __init__(self,
xray_structure,
step,
volume_cutoff=None,
mean_diff_map_threshold=None,
compute_whole=False,
preprocess_against_shallow=True,
largest_only=False,
wrapping=True,
f_obs=None,
r_sol=1.1,
r_shrink=0.9,
f_calc=None,
log = None,
write_masks=False):
adopt_init_args(self, locals())
#
self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY????????????
self.miller_array = f_obs.select(self.dsel)
#
# To avoid "Miller index not in structure factor map" crash
step = min(step, self.miller_array.d_min()/3)
#
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
# compute mask in p1 (via ASU)
self.crystal_gridding = maptbx.crystal_gridding(
unit_cell = xray_structure.unit_cell(),
space_group_info = xray_structure.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
self.n_real = self.crystal_gridding.n_real()
# XXX Where do we want to deal with H and occ==0?
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xray_structure,
p1 = True,
for_structure_factors = True,
solvent_radius = r_sol,
shrink_truncation_radius = r_shrink,
n_real = self.n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
self.f_mask_whole = None
if(compute_whole):
mask = asu_map_ext.asymmetric_map(
xray_structure.crystal_symmetry().space_group().type(), mask_p1).data()
self.f_mask_whole = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask, n_real = self.n_real)
self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size()
if(write_masks):
write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(),
map_data=mask_p1, file_name="mask_whole.mrc")
# conn analysis
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = preprocess_against_shallow,
wrapping = wrapping)
co.merge_symmetry_related_regions(space_group=xray_structure.space_group())
del mask_p1
self.conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
#
f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0)
f_mask_data = flex.complex_double(f_obs.data().size(), 0)
self.FV = OrderedDict()
self.mc = None
diff_map = None
mean_diff_map = None
self.regions = OrderedDict()
self.f_mask_0 = None
self.f_mask = None
#
if(log is not None):
print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log)
#
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
# skip macromolecule
if(i==0): continue
# skip small volume
volume = v*step**3
uc_fraction = v*100./self.conn.size()
if(volume_cutoff is not None):
if volume < volume_cutoff: continue
selection = self.conn==i
mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume)
volume_asu = (mask_i_asu>0).count(True)*step**3
if(uc_fraction >= 1):
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data_0 += f_mask_i.data()
elif(largest_only): break
if(uc_fraction < 1 and diff_map is None):
diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0)
mi,ma,me,sd = None,None,None,None
if(diff_map is not None):
blob = diff_map.select(selection.iselection())
mean_diff_map = flex.mean(diff_map.select(selection.iselection()))
mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob)
sd = blob.sample_standard_deviation()
if(log is not None):
print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4),
"%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%(
mi,ma,me,sd), file=log)
if(mean_diff_map_threshold is not None and
mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold):
continue
self.regions[i_seq] = group_args(
id = i,
i_seq = i_seq,
volume = volume,
uc_fraction = uc_fraction,
diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd))
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data += f_mask_i.data()
self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)]
#
self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0)
self.f_mask = f_obs.customized_copy(data = f_mask_data)
self.do_mosaic = False
self.n_regions = len(self.FV.keys())
if(self.n_regions>1):
self.do_mosaic = True
def compute_f_mask_i(self, mask_i_asu):
f_mask_i = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask_i_asu, n_real = self.n_real)
data = flex.complex_double(self.dsel.size(), 0)
data = data.set_selected(self.dsel, f_mask_i.data())
return self.f_obs.set().array(data = data)
def compute_diff_map(self, f_mask_data):
if(self.f_calc is None): return None
f_mask = self.f_obs.customized_copy(data = f_mask_data)
fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_mask = f_mask)
fmodel = fmodel.select(self.dsel)
fmodel.update_all_scales(remove_outliers=True,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self.mc = fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def compute_i_mask_asu(self, selection, volume):
mask_i = flex.double(flex.grid(self.n_real), 0)
mask_i = mask_i.set_selected(selection, 1)
if(self.write_masks):
write_map_file(
crystal_symmetry = self.crystal_symmetry,
map_data = mask_i,
file_name = "mask_%s.mrc"%str(round(volume,3)))
tmp = asu_map_ext.asymmetric_map(
self.crystal_symmetry.space_group().type(), mask_i).data()
return tmp
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| [
"mmtbx.masks.bulk_solvent",
"cctbx.array_family.flex.grid",
"math.log",
"math.exp",
"scitbx.matrix.sqr",
"cctbx.array_family.flex.min",
"cctbx.array_family.flex.double",
"cctbx.array_family.flex.std_string",
"mmtbx.bulk_solvent.kbu_refinery.lbfgs_run",
"libtbx.group_args",
"collections.OrderedDict",
"scitbx.matrix.col",
"cctbx.maptbx.connectivity",
"mmtbx.masks.manager",
"cctbx.masks.vdw_radii_from_xray_structure",
"mmtbx.ncs.tncs.minimizer",
"cctbx.array_family.flex.mean",
"mmtbx.masks.mask_master_params.extract",
"cctbx.maptbx.unpad_in_place",
"cctbx.array_family.flex.sum",
"boost_adaptbx.boost.python.import_ext",
"cctbx.array_family.flex.max"
]
| [((336, 377), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""cctbx_asymmetric_map_ext"""'], {}), "('cctbx_asymmetric_map_ext')\n", (349, 377), True, 'import boost_adaptbx.boost.python as bp\n'), ((662, 694), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""mmtbx_masks_ext"""'], {}), "('mmtbx_masks_ext')\n", (675, 694), True, 'import boost_adaptbx.boost.python as bp\n'), ((708, 741), 'boost_adaptbx.boost.python.import_ext', 'bp.import_ext', (['"""mmtbx_mosaic_ext"""'], {}), "('mmtbx_mosaic_ext')\n", (721, 741), True, 'import boost_adaptbx.boost.python as bp\n'), ((14610, 14659), 'cctbx.masks.vdw_radii_from_xray_structure', 'vdw_radii_from_xray_structure', ([], {'xray_structure': 'xrs'}), '(xray_structure=xrs)\n', (14639, 14659), False, 'from cctbx.masks import vdw_radii_from_xray_structure\n'), ((14678, 14712), 'mmtbx.masks.mask_master_params.extract', 'masks.mask_master_params.extract', ([], {}), '()\n', (14710, 14712), False, 'from mmtbx import masks\n'), ((18028, 18133), 'cctbx.maptbx.connectivity', 'maptbx.connectivity', ([], {'map_data': 'mask_p1', 'threshold': '(0.01)', 'preprocess_against_shallow': '(True)', 'wrapping': '(True)'}), '(map_data=mask_p1, threshold=0.01,\n preprocess_against_shallow=True, wrapping=True)\n', (18047, 18133), False, 'from cctbx import maptbx\n'), ((29045, 29058), 'scitbx.matrix.sqr', 'matrix.sqr', (['A'], {}), '(A)\n', (29055, 29058), False, 'from scitbx import matrix\n'), ((29085, 29098), 'scitbx.matrix.col', 'matrix.col', (['b'], {}), '(b)\n', (29095, 29098), False, 'from scitbx import matrix\n'), ((2186, 2344), 'mmtbx.bulk_solvent.kbu_refinery.lbfgs_run', 'kbu_refinery.lbfgs_run', ([], {'target_evaluator': 'self', 'min_iterations': 'self.min_iterations', 'max_iterations': 'self.max_iterations', 'use_curvatures': 'use_curvatures'}), '(target_evaluator=self, min_iterations=self.\n min_iterations, max_iterations=self.max_iterations, use_curvatures=\n use_curvatures)\n', (2208, 2344), False, 'from mmtbx.bulk_solvent import kbu_refinery\n'), ((18219, 18236), 'cctbx.array_family.flex.min', 'flex.min', (['mask_p1'], {}), '(mask_p1)\n', (18227, 18236), False, 'from cctbx.array_family import flex\n'), ((18238, 18255), 'cctbx.array_family.flex.max', 'flex.max', (['mask_p1'], {}), '(mask_p1)\n', (18246, 18255), False, 'from cctbx.array_family import flex\n'), ((18464, 18501), 'cctbx.array_family.flex.double', 'flex.double', (['[a / na, b / nb, c / nc]'], {}), '([a / na, b / nb, c / nc])\n', (18475, 18501), False, 'from cctbx.array_family import flex\n'), ((20753, 20787), 'cctbx.maptbx.unpad_in_place', 'maptbx.unpad_in_place', ([], {'map': 'mask_p1'}), '(map=mask_p1)\n', (20774, 20787), False, 'from cctbx import maptbx\n'), ((21329, 21460), 'cctbx.maptbx.connectivity', 'maptbx.connectivity', ([], {'map_data': 'mask_p1', 'threshold': '(0.01)', 'preprocess_against_shallow': 'preprocess_against_shallow', 'wrapping': 'wrapping'}), '(map_data=mask_p1, threshold=0.01,\n preprocess_against_shallow=preprocess_against_shallow, wrapping=wrapping)\n', (21348, 21460), False, 'from cctbx import maptbx\n'), ((21949, 21962), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21960, 21962), False, 'from collections import OrderedDict\n'), ((22046, 22059), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22057, 22059), False, 'from collections import OrderedDict\n'), ((29588, 29599), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (29596, 29599), False, 'import math\n'), ((30517, 30530), 'scitbx.matrix.sqr', 'matrix.sqr', (['A'], {}), '(A)\n', (30527, 30530), False, 'from scitbx import matrix\n'), ((30561, 30574), 'scitbx.matrix.col', 'matrix.col', (['b'], {}), '(b)\n', (30571, 30574), False, 'from scitbx import matrix\n'), ((5390, 5403), 'cctbx.array_family.flex.double', 'flex.double', ([], {}), '()\n', (5401, 5403), False, 'from cctbx.array_family import flex\n'), ((6530, 6551), 'cctbx.array_family.flex.std_string', 'flex.std_string', (["['']"], {}), "([''])\n", (6545, 6551), False, 'from cctbx.array_family import flex\n'), ((7815, 7828), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7826, 7828), False, 'from collections import OrderedDict\n'), ((25382, 25404), 'cctbx.array_family.flex.grid', 'flex.grid', (['self.n_real'], {}), '(self.n_real)\n', (25391, 25404), False, 'from cctbx.array_family import flex\n'), ((26352, 26383), 'cctbx.array_family.flex.double', 'flex.double', (['k_mask_trial_range'], {}), '(k_mask_trial_range)\n', (26363, 26383), False, 'from cctbx.array_family import flex\n'), ((16378, 16412), 'cctbx.maptbx.unpad_in_place', 'maptbx.unpad_in_place', ([], {'map': 'mask_p1'}), '(map=mask_p1)\n', (16399, 16412), False, 'from cctbx import maptbx\n'), ((29385, 29404), 'cctbx.array_family.flex.double', 'flex.double', (['Xmn[j]'], {}), '(Xmn[j])\n', (29396, 29404), False, 'from cctbx.array_family import flex\n'), ((29498, 29517), 'math.log', 'math.log', (['Xmn[n][m]'], {}), '(Xmn[n][m])\n', (29506, 29517), False, 'import math\n'), ((30472, 30484), 'cctbx.array_family.flex.sum', 'flex.sum', (['Hj'], {}), '(Hj)\n', (30480, 30484), False, 'from cctbx.array_family import flex\n'), ((5945, 5973), 'cctbx.array_family.flex.sum', 'flex.sum', (['(tmp1 * tmp1 + tmp2)'], {}), '(tmp1 * tmp1 + tmp2)\n', (5953, 5973), False, 'from cctbx.array_family import flex\n'), ((23178, 23192), 'cctbx.array_family.flex.min', 'flex.min', (['blob'], {}), '(blob)\n', (23186, 23192), False, 'from cctbx.array_family import flex\n'), ((23194, 23208), 'cctbx.array_family.flex.max', 'flex.max', (['blob'], {}), '(blob)\n', (23202, 23208), False, 'from cctbx.array_family import flex\n'), ((23210, 23225), 'cctbx.array_family.flex.mean', 'flex.mean', (['blob'], {}), '(blob)\n', (23219, 23225), False, 'from cctbx.array_family import flex\n'), ((23821, 23859), 'libtbx.group_args', 'group_args', ([], {'mi': 'mi', 'ma': 'ma', 'me': 'me', 'sd': 'sd'}), '(mi=mi, ma=ma, me=me, sd=sd)\n', (23831, 23859), False, 'from libtbx import group_args\n'), ((27568, 27678), 'mmtbx.ncs.tncs.minimizer', 'tncs.minimizer', ([], {'potential': 'calculator', 'use_bounds': '(2)', 'lower_bound': 'lower', 'upper_bound': 'upper', 'initial_values': 'x'}), '(potential=calculator, use_bounds=2, lower_bound=lower,\n upper_bound=upper, initial_values=x)\n', (27582, 27678), False, 'from mmtbx.ncs import tncs\n'), ((28962, 28985), 'cctbx.array_family.flex.sum', 'flex.sum', (['(Gnm_u * Gnm_v)'], {}), '(Gnm_u * Gnm_v)\n', (28970, 28985), False, 'from cctbx.array_family import flex\n'), ((30378, 30391), 'cctbx.array_family.flex.sum', 'flex.sum', (['Gjn'], {}), '(Gjn)\n', (30386, 30391), False, 'from cctbx.array_family import flex\n'), ((30928, 30947), 'cctbx.array_family.flex.double', 'flex.double', (['x_prev'], {}), '(x_prev)\n', (30939, 30947), False, 'from cctbx.array_family import flex\n'), ((30948, 30963), 'cctbx.array_family.flex.double', 'flex.double', (['x_'], {}), '(x_)\n', (30959, 30963), False, 'from cctbx.array_family import flex\n'), ((16649, 16909), 'mmtbx.masks.bulk_solvent', 'masks.bulk_solvent', ([], {'xray_structure': 'xrs', 'ignore_zero_occupancy_atoms': '(False)', 'solvent_radius': 'mask_params.solvent_radius', 'shrink_truncation_radius': 'mask_params.shrink_truncation_radius', 'ignore_hydrogen_atoms': '(False)', 'grid_step': 'step', 'atom_radii': 'atom_radii'}), '(xray_structure=xrs, ignore_zero_occupancy_atoms=False,\n solvent_radius=mask_params.solvent_radius, shrink_truncation_radius=\n mask_params.shrink_truncation_radius, ignore_hydrogen_atoms=False,\n grid_step=step, atom_radii=atom_radii)\n', (16667, 16909), False, 'from mmtbx import masks\n'), ((17679, 17758), 'mmtbx.masks.manager', 'masks.manager', ([], {'miller_array': 'ma', 'miller_array_twin': 'None', 'mask_params': 'mask_params'}), '(miller_array=ma, miller_array_twin=None, mask_params=mask_params)\n', (17692, 17758), False, 'from mmtbx import masks\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30)))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1)))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1)))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.ravel()))
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4)))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2)))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunks=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunks=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
self.assertTrue(np.array_equal(res, expected))
def testArraySplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunks=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunks=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunks=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunks=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunks=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testPtpExecution(self):
x = arange(4, chunks=1).reshape(2, 2)
t = ptp(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
np.testing.assert_equal(res, expected)
t = ptp(x, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
np.testing.assert_equal(res, expected)
t = ptp(x)
res = self.executor.execute_tensor(t)[0]
expected = np.ptp(np.arange(4).reshape(2, 2))
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunks=2)
to_end = tensor([88, 99], chunks=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testDigitizeExecution(self):
data = np.array([0.2, 6.4, 3.0, 1.6])
x = tensor(data, chunks=2)
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
b = tensor(bins, chunks=2)
inds = digitize(x, b)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
data = np.array([1.2, 10.0, 12.4, 15.5, 20.])
x = tensor(data, chunks=2)
bins = np.array([0, 5, 10, 15, 20])
inds = digitize(x, bins, right=True)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=True)
np.testing.assert_equal(res, expected)
inds = digitize(x, bins, right=False)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=False)
np.testing.assert_equal(res, expected)
data = sps.random(10, 1, density=.1) * 12
x = tensor(data, chunks=2)
bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data.toarray(), bins, right=False)
np.testing.assert_equal(res.toarray(), expected)
def testAverageExecution(self):
data = arange(1, 5, chunks=1)
t = average(data)
res = self.executor.execute_tensor(t)[0]
expected = np.average(np.arange(1, 5))
self.assertEqual(res, expected)
t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2))
res = self.executor.execute_tensor(t)[0]
expected = np.average(range(1, 11), weights=range(10, 0, -1))
self.assertEqual(res, expected)
data = arange(6, chunks=2).reshape((3, 2))
t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4))
np.testing.assert_equal(res, expected)
with self.assertRaises(TypeError):
average(data, weights=tensor([1./4, 3./4], chunks=2))
def testCovExecution(self):
data = np.array([[0, 2], [1, 1], [2, 0]]).T
x = tensor(data, chunks=1)
t = cov(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.cov(data)
np.testing.assert_equal(res, expected)
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
X = stack((x, y), axis=0)
t = cov(x, y)
r = tall(t == cov(X))
self.assertTrue(self.executor.execute_tensor(r)[0])
def testCorrcoefExecution(self):
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
t = corrcoef(x, y)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.corrcoef(data_x, data_y)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunks=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunks=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunks=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunks=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunks=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunks=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
| [
"mars.tensor.expressions.base.ptp",
"numpy.testing.assert_equal",
"numpy.random.rand",
"mars.tensor.expressions.base.argwhere",
"mars.tensor.expressions.base.moveaxis",
"mars.tensor.expressions.base.copyto",
"mars.tensor.expressions.base.vsplit",
"mars.tensor.expressions.base.average",
"numpy.array",
"mars.tensor.expressions.base.flipud",
"mars.tensor.expressions.base.expand_dims",
"mars.tensor.arange",
"mars.tensor.expressions.base.hsplit",
"numpy.cov",
"numpy.arange",
"mars.tensor.expressions.base.split",
"mars.tensor.expressions.base.roll",
"mars.tensor.expressions.base.atleast_2d",
"mars.tensor.expressions.merge.stack",
"numpy.repeat",
"numpy.random.random",
"numpy.where",
"mars.tensor.expressions.base.rollaxis",
"numpy.diff",
"mars.tensor.expressions.base.corrcoef",
"mars.tensor.expressions.datasource.zeros",
"scipy.sparse.random",
"mars.tensor.expressions.base.ediff1d",
"mars.tensor.expressions.base.flip",
"mars.tensor.expressions.datasource.ones",
"mars.tensor.expressions.datasource.arange",
"mars.tensor.execution.core.Executor",
"numpy.tile",
"numpy.ones",
"mars.tensor.expressions.base.atleast_3d",
"mars.tensor.expressions.base.squeeze",
"numpy.digitize",
"mars.tensor.expressions.base.broadcast_to",
"numpy.corrcoef",
"numpy.ediff1d",
"mars.tensor.expressions.base.broadcast_arrays",
"mars.tensor.expressions.base.transpose",
"numpy.squeeze",
"mars.tensor.expressions.base.tile",
"mars.tensor.expressions.base.isin",
"mars.tensor.expressions.base.repeat",
"numpy.broadcast_arrays",
"numpy.random.randn",
"mars.tensor.expressions.base.where",
"mars.tensor.expressions.base.digitize",
"numpy.broadcast_to",
"mars.tensor.expressions.base.cov",
"numpy.atleast_3d",
"mars.tensor.expressions.base.diff",
"numpy.random.randint",
"numpy.array_equal",
"mars.tensor.expressions.base.atleast_1d",
"numpy.expand_dims",
"mars.tensor.expressions.base.array_split",
"mars.tensor.expressions.base.fliplr",
"mars.tensor.expressions.datasource.tensor",
"mars.tensor.expressions.base.dsplit"
]
| [((1394, 1411), 'mars.tensor.execution.core.Executor', 'Executor', (['"""numpy"""'], {}), "('numpy')\n", (1402, 1411), False, 'from mars.tensor.execution.core import Executor\n'), ((1463, 1488), 'numpy.random.random', 'np.random.random', (['(11, 8)'], {}), '((11, 8))\n', (1479, 1488), True, 'import numpy as np\n'), ((1503, 1524), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (1509, 1524), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2022, 2044), 'mars.tensor.expressions.datasource.ones', 'ones', (['(2, 3)'], {'chunks': '(1)'}), '((2, 3), chunks=1)\n', (2026, 2044), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2057, 2085), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[3, -1, 3]'], {'chunks': '(2)'}), '([3, -1, 3], chunks=2)\n', (2063, 2085), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2095, 2120), 'mars.tensor.expressions.base.copyto', 'copyto', (['a', 'b'], {'where': '(b > 1)'}), '(a, b, where=b > 1)\n', (2101, 2120), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((2203, 2235), 'numpy.array', 'np.array', (['[[3, 1, 3], [3, 1, 3]]'], {}), '([[3, 1, 3], [3, 1, 3]])\n', (2211, 2235), True, 'import numpy as np\n'), ((2245, 2283), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (2268, 2283), True, 'import numpy as np\n'), ((2334, 2359), 'numpy.random.random', 'np.random.random', (['(10, 5)'], {}), '((10, 5))\n', (2350, 2359), True, 'import numpy as np\n'), ((2374, 2395), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2380, 2395), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2572, 2602), 'scipy.sparse.random', 'sps.random', (['(10)', '(5)'], {'density': '(0.2)'}), '(10, 5, density=0.2)\n', (2582, 2602), True, 'import scipy.sparse as sps\n'), ((2616, 2637), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2622, 2637), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2872, 2900), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (2888, 2900), True, 'import numpy as np\n'), ((2915, 2936), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2921, 2936), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2952, 2966), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (2961, 2966), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3102, 3135), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {'axes': '(-2, -1, -3)'}), '(arr, axes=(-2, -1, -3))\n', (3111, 3135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3287, 3304), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {}), '(11, 8)\n', (3297, 3304), True, 'import scipy.sparse as sps\n'), ((3319, 3340), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3325, 3340), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3356, 3370), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (3365, 3370), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3604, 3632), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (3620, 3632), True, 'import numpy as np\n'), ((3647, 3668), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3653, 3668), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3850, 3880), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {'density': '(0.2)'}), '(11, 8, density=0.2)\n', (3860, 3880), True, 'import scipy.sparse as sps\n'), ((3894, 3915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3900, 3915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4152, 4178), 'mars.tensor.expressions.datasource.zeros', 'zeros', (['(3, 4, 5)'], {'chunks': '(2)'}), '((3, 4, 5), chunks=2)\n', (4157, 4178), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4192, 4210), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(0)', '(-1)'], {}), '(x, 0, -1)\n', (4200, 4210), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4334, 4352), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(-1)', '(0)'], {}), '(x, -1, 0)\n', (4342, 4352), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4476, 4505), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1]', '[-1, -2]'], {}), '(x, [0, 1], [-1, -2])\n', (4484, 4505), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4629, 4665), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1, 2]', '[-1, -2, -3]'], {}), '(x, [0, 1, 2], [-1, -2, -3])\n', (4637, 4665), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4831, 4859), 'numpy.random.random', 'np.random.random', (['(10, 5, 1)'], {}), '((10, 5, 1))\n', (4847, 4859), True, 'import numpy as np\n'), ((4874, 4895), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(2)'}), '(raw, chunks=2)\n', (4880, 4895), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4911, 4943), 'mars.tensor.expressions.base.broadcast_to', 'broadcast_to', (['arr', '(5, 10, 5, 6)'], {}), '(arr, (5, 10, 5, 6))\n', (4923, 4943), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5180, 5204), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(1)'}), '(x_data, chunks=1)\n', (5186, 5204), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5250, 5274), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['y_data'], {'chunks': '(2)'}), '(y_data, chunks=2)\n', (5256, 5274), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5288, 5310), 'mars.tensor.expressions.base.broadcast_arrays', 'broadcast_arrays', (['x', 'y'], {}), '(x, y)\n', (5304, 5310), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5410, 5445), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (5429, 5445), True, 'import numpy as np\n'), ((5583, 5630), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (5600, 5630), True, 'import numpy as np\n'), ((5647, 5667), 'numpy.random.rand', 'np.random.rand', (['(4)', '(1)'], {}), '(4, 1)\n', (5661, 5667), True, 'import numpy as np\n'), ((5684, 5704), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (5698, 5704), True, 'import numpy as np\n'), ((5819, 5836), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (5824, 5836), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6080, 6109), 'scipy.sparse.random', 'sps.random', (['(4)', '(1)'], {'density': '(0.1)'}), '(4, 1, density=0.1)\n', (6090, 6109), True, 'import scipy.sparse as sps\n'), ((6125, 6154), 'scipy.sparse.random', 'sps.random', (['(4)', '(4)'], {'density': '(0.1)'}), '(4, 4, density=0.1)\n', (6135, 6154), True, 'import scipy.sparse as sps\n'), ((6268, 6285), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (6273, 6285), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6564, 6590), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (6578, 6590), True, 'import numpy as np\n'), ((6603, 6629), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (6609, 6629), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7292, 7319), 'numpy.random.rand', 'np.random.rand', (['(30)', '(100)', '(20)'], {}), '(30, 100, 20)\n', (7306, 7319), True, 'import numpy as np\n'), ((7332, 7358), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7338, 7358), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7953, 7979), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (7967, 7979), True, 'import numpy as np\n'), ((7992, 8018), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7998, 8018), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8032, 8049), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (8043, 8049), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8200, 8217), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (8211, 8217), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8368, 8385), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(3)'], {}), '(x, 3)\n', (8379, 8385), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8536, 8554), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (8547, 8554), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8706, 8724), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-4)'], {}), '(x, -4)\n', (8717, 8724), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9068, 9096), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4, 5, 6)'], {'chunks': '(1)'}), '((3, 4, 5, 6), chunks=1)\n', (9072, 9096), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9109, 9126), 'mars.tensor.expressions.base.rollaxis', 'rollaxis', (['x', '(3)', '(1)'], {}), '(x, 3, 1)\n', (9117, 9126), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9342, 9359), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9346, 9359), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9372, 9394), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9376, 9394), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9408, 9427), 'mars.tensor.expressions.base.atleast_1d', 'atleast_1d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9418, 9427), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9758, 9775), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9762, 9775), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9788, 9810), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9792, 9810), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9824, 9843), 'mars.tensor.expressions.base.atleast_2d', 'atleast_2d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9834, 9843), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10191, 10208), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (10195, 10208), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10221, 10243), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (10225, 10243), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10257, 10276), 'mars.tensor.expressions.base.atleast_3d', 'atleast_3d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (10267, 10276), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10671, 10686), 'mars.tensor.expressions.base.argwhere', 'argwhere', (['(x > 1)'], {}), '(x > 1)\n', (10679, 10686), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10972, 10997), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '(3)'], {'axis': '(2)'}), '(x, 3, axis=2)\n', (10983, 10997), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11287, 11324), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (11298, 11324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11710, 11729), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(2)'}), '(x, 4, axis=2)\n', (11715, 11729), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12013, 12044), 'mars.tensor.expressions.base.split', 'split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (12018, 12044), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12409, 12421), 'mars.tensor.expressions.base.hsplit', 'hsplit', (['x', '(4)'], {}), '(x, 4)\n', (12415, 12421), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12767, 12779), 'mars.tensor.expressions.base.vsplit', 'vsplit', (['x', '(4)'], {}), '(x, 4)\n', (12773, 12779), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13123, 13135), 'mars.tensor.expressions.base.dsplit', 'dsplit', (['x', '(4)'], {}), '(x, 4)\n', (13129, 13135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13416, 13446), 'scipy.sparse.random', 'sps.random', (['(12)', '(8)'], {'density': '(0.1)'}), '(12, 8, density=0.1)\n', (13426, 13446), True, 'import scipy.sparse as sps\n'), ((13458, 13482), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3)'}), '(x_data, chunks=3)\n', (13464, 13482), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13496, 13515), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(0)'}), '(x, 4, axis=0)\n', (13501, 13515), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13827, 13847), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)'], {'chunks': '(2)'}), '(10, chunks=2)\n', (13833, 13847), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13861, 13871), 'mars.tensor.expressions.base.roll', 'roll', (['x', '(2)'], {}), '(x, 2)\n', (13865, 13871), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13988, 14026), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14011, 14026), True, 'import numpy as np\n'), ((14070, 14081), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {}), '(x2, 1)\n', (14074, 14081), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14212, 14250), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14235, 14250), True, 'import numpy as np\n'), ((14264, 14283), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(0)'}), '(x2, 1, axis=0)\n', (14268, 14283), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14422, 14460), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14445, 14460), True, 'import numpy as np\n'), ((14474, 14493), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(1)'}), '(x2, 1, axis=1)\n', (14478, 14493), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14632, 14670), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14655, 14670), True, 'import numpy as np\n'), ((14723, 14750), 'numpy.array', 'np.array', (['[[[0], [1], [2]]]'], {}), '([[[0], [1], [2]]])\n', (14731, 14750), True, 'import numpy as np\n'), ((14763, 14785), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (14769, 14785), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((14799, 14809), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {}), '(x)\n', (14806, 14809), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14892, 14908), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (14902, 14908), True, 'import numpy as np\n'), ((14917, 14955), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14940, 14955), True, 'import numpy as np\n'), ((14969, 14987), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (14976, 14987), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15070, 15094), 'numpy.squeeze', 'np.squeeze', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (15080, 15094), True, 'import numpy as np\n'), ((15103, 15141), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15126, 15141), True, 'import numpy as np\n'), ((15234, 15248), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (15237, 15248), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15382, 15420), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15405, 15420), True, 'import numpy as np\n'), ((15434, 15448), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (15437, 15448), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15582, 15620), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15605, 15620), True, 'import numpy as np\n'), ((15634, 15640), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {}), '(x)\n', (15637, 15640), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15753, 15791), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15776, 15791), True, 'import numpy as np\n'), ((15841, 15866), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (15849, 15866), True, 'import numpy as np\n'), ((15879, 15901), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (15885, 15901), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15915, 15922), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (15919, 15922), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16005, 16018), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16012, 16018), True, 'import numpy as np\n'), ((16027, 16065), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16050, 16065), True, 'import numpy as np\n'), ((16079, 16091), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'n': '(2)'}), '(x, n=2)\n', (16083, 16091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16174, 16192), 'numpy.diff', 'np.diff', (['data'], {'n': '(2)'}), '(data, n=2)\n', (16181, 16192), True, 'import numpy as np\n'), ((16201, 16239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16224, 16239), True, 'import numpy as np\n'), ((16256, 16295), 'numpy.array', 'np.array', (['[[1, 3, 6, 10], [0, 5, 6, 8]]'], {}), '([[1, 3, 6, 10], [0, 5, 6, 8]])\n', (16264, 16295), True, 'import numpy as np\n'), ((16308, 16330), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (16314, 16330), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((16344, 16351), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16348, 16351), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16434, 16447), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16441, 16447), True, 'import numpy as np\n'), ((16456, 16494), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16479, 16494), True, 'import numpy as np\n'), ((16508, 16523), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (16512, 16523), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16606, 16627), 'numpy.diff', 'np.diff', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (16613, 16627), True, 'import numpy as np\n'), ((16636, 16674), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16659, 16674), True, 'import numpy as np\n'), ((16688, 16746), 'mars.tensor.arange', 'mt.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'mt.datetime64'}), "('1066-10-13', '1066-10-16', dtype=mt.datetime64)\n", (16697, 16746), True, 'from mars import tensor as mt\n'), ((16759, 16766), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16763, 16766), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16925, 16963), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16948, 16963), True, 'import numpy as np\n'), ((17007, 17032), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (17015, 17032), True, 'import numpy as np\n'), ((17045, 17067), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17051, 17067), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17081, 17091), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {}), '(x)\n', (17088, 17091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17174, 17190), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17184, 17190), True, 'import numpy as np\n'), ((17199, 17237), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17222, 17237), True, 'import numpy as np\n'), ((17258, 17279), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['(-99)'], {'chunks': '(2)'}), '(-99, chunks=2)\n', (17264, 17279), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17297, 17323), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[88, 99]'], {'chunks': '(2)'}), '([88, 99], chunks=2)\n', (17303, 17323), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17336, 17380), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {'to_begin': 'to_begin', 'to_end': 'to_end'}), '(x, to_begin=to_begin, to_end=to_end)\n', (17343, 17380), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17529, 17567), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17552, 17567), True, 'import numpy as np\n'), ((17735, 17751), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17745, 17751), True, 'import numpy as np\n'), ((17760, 17798), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17783, 17798), True, 'import numpy as np\n'), ((17852, 17882), 'numpy.array', 'np.array', (['[0.2, 6.4, 3.0, 1.6]'], {}), '([0.2, 6.4, 3.0, 1.6])\n', (17860, 17882), True, 'import numpy as np\n'), ((17895, 17917), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17901, 17917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17933, 17969), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.5, 4.0, 10.0]'], {}), '([0.0, 1.0, 2.5, 4.0, 10.0])\n', (17941, 17969), True, 'import numpy as np\n'), ((17985, 18002), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (17993, 18002), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18088, 18111), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18099, 18111), True, 'import numpy as np\n'), ((18120, 18158), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18143, 18158), True, 'import numpy as np\n'), ((18172, 18194), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['bins'], {'chunks': '(2)'}), '(bins, chunks=2)\n', (18178, 18194), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18210, 18224), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'b'], {}), '(x, b)\n', (18218, 18224), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18310, 18333), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18321, 18333), True, 'import numpy as np\n'), ((18342, 18380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18365, 18380), True, 'import numpy as np\n'), ((18397, 18436), 'numpy.array', 'np.array', (['[1.2, 10.0, 12.4, 15.5, 20.0]'], {}), '([1.2, 10.0, 12.4, 15.5, 20.0])\n', (18405, 18436), True, 'import numpy as np\n'), ((18448, 18470), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (18454, 18470), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18486, 18514), 'numpy.array', 'np.array', (['[0, 5, 10, 15, 20]'], {}), '([0, 5, 10, 15, 20])\n', (18494, 18514), True, 'import numpy as np\n'), ((18530, 18559), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(True)'}), '(x, bins, right=True)\n', (18538, 18559), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18645, 18680), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(True)'}), '(data, bins, right=True)\n', (18656, 18680), True, 'import numpy as np\n'), ((18689, 18727), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18712, 18727), True, 'import numpy as np\n'), ((18744, 18774), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(False)'}), '(x, bins, right=False)\n', (18752, 18774), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18860, 18896), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(False)'}), '(data, bins, right=False)\n', (18871, 18896), True, 'import numpy as np\n'), ((18905, 18943), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18928, 18943), True, 'import numpy as np\n'), ((19007, 19029), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (19013, 19029), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19045, 19081), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.5, 4.0, 10.0]'], {}), '([1.0, 2.0, 2.5, 4.0, 10.0])\n', (19053, 19081), True, 'import numpy as np\n'), ((19097, 19114), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (19105, 19114), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((19356, 19378), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(5)'], {'chunks': '(1)'}), '(1, 5, chunks=1)\n', (19362, 19378), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19391, 19404), 'mars.tensor.expressions.base.average', 'average', (['data'], {}), '(data)\n', (19398, 19404), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20070, 20108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20093, 20108), True, 'import numpy as np\n'), ((20316, 20338), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (20322, 20338), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20352, 20358), 'mars.tensor.expressions.base.cov', 'cov', (['x'], {}), '(x)\n', (20355, 20358), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20441, 20453), 'numpy.cov', 'np.cov', (['data'], {}), '(data)\n', (20447, 20453), True, 'import numpy as np\n'), ((20462, 20500), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20485, 20500), True, 'import numpy as np\n'), ((20581, 20605), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20587, 20605), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20618, 20642), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20624, 20642), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20656, 20677), 'mars.tensor.expressions.merge.stack', 'stack', (['(x, y)'], {'axis': '(0)'}), '((x, y), axis=0)\n', (20661, 20677), False, 'from mars.tensor.expressions.merge import stack\n'), ((20690, 20699), 'mars.tensor.expressions.base.cov', 'cov', (['x', 'y'], {}), '(x, y)\n', (20693, 20699), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20905, 20929), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20911, 20929), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20942, 20966), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20948, 20966), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20980, 20994), 'mars.tensor.expressions.base.corrcoef', 'corrcoef', (['x', 'y'], {}), '(x, y)\n', (20988, 20994), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21077, 21104), 'numpy.corrcoef', 'np.corrcoef', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (21088, 21104), True, 'import numpy as np\n'), ((21113, 21151), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21136, 21151), True, 'import numpy as np\n'), ((21250, 21260), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(0)'], {}), '(a, 0)\n', (21254, 21260), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21393, 21431), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21416, 21431), True, 'import numpy as np\n'), ((21445, 21455), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(1)'], {}), '(a, 1)\n', (21449, 21455), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21588, 21626), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21611, 21626), True, 'import numpy as np\n'), ((21640, 21649), 'mars.tensor.expressions.base.flipud', 'flipud', (['a'], {}), '(a)\n', (21646, 21649), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21781, 21819), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21804, 21819), True, 'import numpy as np\n'), ((21833, 21842), 'mars.tensor.expressions.base.fliplr', 'fliplr', (['a'], {}), '(a)\n', (21839, 21842), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21974, 22012), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21997, 22012), True, 'import numpy as np\n'), ((22061, 22073), 'mars.tensor.expressions.base.repeat', 'repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22067, 22073), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22143, 22158), 'numpy.repeat', 'np.repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22152, 22158), True, 'import numpy as np\n'), ((22167, 22205), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22190, 22205), True, 'import numpy as np\n'), ((22224, 22247), 'numpy.random.randn', 'np.random.randn', (['(20)', '(30)'], {}), '(20, 30)\n', (22239, 22247), True, 'import numpy as np\n'), ((22260, 22289), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (22266, 22289), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((22303, 22315), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {}), '(x, 2)\n', (22309, 22315), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22398, 22418), 'numpy.repeat', 'np.repeat', (['x_data', '(2)'], {}), '(x_data, 2)\n', (22407, 22418), True, 'import numpy as np\n'), ((22427, 22465), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22450, 22465), True, 'import numpy as np\n'), ((22479, 22499), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(3)'], {'axis': '(1)'}), '(x, 3, axis=1)\n', (22485, 22499), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22582, 22610), 'numpy.repeat', 'np.repeat', (['x_data', '(3)'], {'axis': '(1)'}), '(x_data, 3, axis=1)\n', (22591, 22610), True, 'import numpy as np\n'), ((22619, 22657), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22642, 22657), True, 'import numpy as np\n'), ((22835, 22873), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22858, 22873), True, 'import numpy as np\n'), ((23058, 23096), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23081, 23096), True, 'import numpy as np\n'), ((23115, 23146), 'scipy.sparse.random', 'sps.random', (['(20)', '(30)'], {'density': '(0.1)'}), '(20, 30, density=0.1)\n', (23125, 23146), True, 'import scipy.sparse as sps\n'), ((23158, 23187), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (23164, 23187), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23201, 23221), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {'axis': '(1)'}), '(x, 2, axis=1)\n', (23207, 23221), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23451, 23470), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (23459, 23470), True, 'import numpy as np\n'), ((23483, 23507), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['a_data'], {'chunks': '(2)'}), '(a_data, chunks=2)\n', (23489, 23507), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23521, 23531), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2)'], {}), '(a, 2)\n', (23525, 23531), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23614, 23632), 'numpy.tile', 'np.tile', (['a_data', '(2)'], {}), '(a_data, 2)\n', (23621, 23632), True, 'import numpy as np\n'), ((23641, 23679), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23664, 23679), True, 'import numpy as np\n'), ((23693, 23708), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 2)'], {}), '(a, (2, 2))\n', (23697, 23708), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23791, 23814), 'numpy.tile', 'np.tile', (['a_data', '(2, 2)'], {}), '(a_data, (2, 2))\n', (23798, 23814), True, 'import numpy as np\n'), ((23823, 23861), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23846, 23861), True, 'import numpy as np\n'), ((23875, 23893), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 1, 2)'], {}), '(a, (2, 1, 2))\n', (23879, 23893), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23976, 24002), 'numpy.tile', 'np.tile', (['a_data', '(2, 1, 2)'], {}), '(a_data, (2, 1, 2))\n', (23983, 24002), True, 'import numpy as np\n'), ((24011, 24049), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24034, 24049), True, 'import numpy as np\n'), ((24068, 24094), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (24076, 24094), True, 'import numpy as np\n'), ((24107, 24131), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['b_data'], {'chunks': '(1)'}), '(b_data, chunks=1)\n', (24113, 24131), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24145, 24155), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2)'], {}), '(b, 2)\n', (24149, 24155), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24238, 24256), 'numpy.tile', 'np.tile', (['b_data', '(2)'], {}), '(b_data, 2)\n', (24245, 24256), True, 'import numpy as np\n'), ((24265, 24303), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24288, 24303), True, 'import numpy as np\n'), ((24317, 24332), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2, 1)'], {}), '(b, (2, 1))\n', (24321, 24332), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24415, 24438), 'numpy.tile', 'np.tile', (['b_data', '(2, 1)'], {}), '(b_data, (2, 1))\n', (24422, 24438), True, 'import numpy as np\n'), ((24447, 24485), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24470, 24485), True, 'import numpy as np\n'), ((24504, 24526), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (24512, 24526), True, 'import numpy as np\n'), ((24539, 24563), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['c_data'], {'chunks': '(3)'}), '(c_data, chunks=3)\n', (24545, 24563), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24577, 24592), 'mars.tensor.expressions.base.tile', 'tile', (['c', '(4, 1)'], {}), '(c, (4, 1))\n', (24581, 24592), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24675, 24698), 'numpy.tile', 'np.tile', (['c_data', '(4, 1)'], {}), '(c_data, (4, 1))\n', (24682, 24698), True, 'import numpy as np\n'), ((24707, 24745), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24730, 24745), True, 'import numpy as np\n'), ((24891, 24919), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {}), '(element, test_elements)\n', (24895, 24919), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25070, 25108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25093, 25108), True, 'import numpy as np\n'), ((25203, 25219), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (25211, 25219), True, 'import numpy as np\n'), ((25228, 25266), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25251, 25266), True, 'import numpy as np\n'), ((25283, 25324), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {'invert': '(True)'}), '(element, test_elements, invert=True)\n', (25287, 25324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25488, 25526), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25511, 25526), True, 'import numpy as np\n'), ((25621, 25637), 'numpy.array', 'np.array', (['[0, 6]'], {}), '([0, 6])\n', (25629, 25637), True, 'import numpy as np\n'), ((25646, 25684), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25669, 25684), True, 'import numpy as np\n'), ((25733, 25756), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_set'], {}), '(element, test_set)\n', (25737, 25756), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25902, 25940), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25925, 25940), True, 'import numpy as np\n'), ((1630, 1665), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw[:4, :4]'], {}), '(res[0], raw[:4, :4])\n', (1644, 1665), True, 'import numpy as np\n'), ((1691, 1726), 'numpy.array_equal', 'np.array_equal', (['res[1]', 'raw[:4, 4:]'], {}), '(res[1], raw[:4, 4:])\n', (1705, 1726), True, 'import numpy as np\n'), ((1752, 1788), 'numpy.array_equal', 'np.array_equal', (['res[2]', 'raw[4:8, :4]'], {}), '(res[2], raw[4:8, :4])\n', (1766, 1788), True, 'import numpy as np\n'), ((1814, 1850), 'numpy.array_equal', 'np.array_equal', (['res[3]', 'raw[4:8, 4:]'], {}), '(res[3], raw[4:8, 4:])\n', (1828, 1850), True, 'import numpy as np\n'), ((1876, 1911), 'numpy.array_equal', 'np.array_equal', (['res[4]', 'raw[8:, :4]'], {}), '(res[4], raw[8:, :4])\n', (1890, 1911), True, 'import numpy as np\n'), ((1937, 1972), 'numpy.array_equal', 'np.array_equal', (['res[5]', 'raw[8:, 4:]'], {}), '(res[5], raw[8:, 4:])\n', (1951, 1972), True, 'import numpy as np\n'), ((3055, 3084), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw.T'], {}), '(res[0], raw.T)\n', (3069, 3084), True, 'import numpy as np\n'), ((5499, 5528), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (5522, 5528), True, 'import numpy as np\n'), ((5727, 5753), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (5733, 5753), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5755, 5778), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (5761, 5778), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5780, 5803), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (5786, 5803), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6015, 6062), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (6032, 6062), True, 'import numpy as np\n'), ((6176, 6202), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (6182, 6202), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6204, 6227), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (6210, 6227), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6229, 6252), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (6235, 6252), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8922, 8940), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-5)'], {}), '(x, -5)\n', (8933, 8940), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9000, 9017), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(4)'], {}), '(x, 4)\n', (9011, 9017), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10838, 10867), 'numpy.array_equal', 'np.array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10852, 10867), True, 'import numpy as np\n'), ((11211, 11240), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11234, 11240), True, 'import numpy as np\n'), ((11550, 11579), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11573, 11579), True, 'import numpy as np\n'), ((11937, 11966), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11960, 11966), True, 'import numpy as np\n'), ((12264, 12293), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12287, 12293), True, 'import numpy as np\n'), ((12624, 12653), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12647, 12653), True, 'import numpy as np\n'), ((12980, 13009), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13003, 13009), True, 'import numpy as np\n'), ((13336, 13365), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13359, 13365), True, 'import numpy as np\n'), ((13962, 13975), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13971, 13975), True, 'import numpy as np\n'), ((16857, 16915), 'numpy.arange', 'np.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'np.datetime64'}), "('1066-10-13', '1066-10-16', dtype=np.datetime64)\n", (16866, 16915), True, 'import numpy as np\n'), ((17629, 17651), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17635, 17651), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18960, 18990), 'scipy.sparse.random', 'sps.random', (['(10)', '(1)'], {'density': '(0.1)'}), '(10, 1, density=0.1)\n', (18970, 18990), True, 'import scipy.sparse as sps\n'), ((19485, 19500), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (19494, 19500), True, 'import numpy as np\n'), ((19563, 19586), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(11)'], {'chunks': '(2)'}), '(1, 11, chunks=2)\n', (19569, 19586), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20267, 20301), 'numpy.array', 'np.array', (['[[0, 2], [1, 1], [2, 0]]'], {}), '([[0, 2], [1, 1], [2, 0]])\n', (20275, 20301), True, 'import numpy as np\n'), ((22681, 22694), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22690, 22694), True, 'import numpy as np\n'), ((22804, 22817), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22813, 22817), True, 'import numpy as np\n'), ((22897, 22917), 'mars.tensor.expressions.datasource.arange', 'arange', (['(20)'], {'chunks': '(5)'}), '(20, chunks=5)\n', (22903, 22917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23027, 23040), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (23036, 23040), True, 'import numpy as np\n'), ((5055, 5090), 'numpy.broadcast_to', 'np.broadcast_to', (['raw', '(5, 10, 5, 6)'], {}), '(raw, (5, 10, 5, 6))\n', (5070, 5090), True, 'import numpy as np\n'), ((5945, 5977), 'numpy.where', 'np.where', (['raw_cond', 'raw_x', 'raw_y'], {}), '(raw_cond, raw_x, raw_y)\n', (5953, 5977), True, 'import numpy as np\n'), ((8157, 8184), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(1)'], {}), '(raw_data, 1)\n', (8171, 8184), True, 'import numpy as np\n'), ((8325, 8352), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(0)'], {}), '(raw_data, 0)\n', (8339, 8352), True, 'import numpy as np\n'), ((8493, 8520), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(3)'], {}), '(raw_data, 3)\n', (8507, 8520), True, 'import numpy as np\n'), ((8662, 8690), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-1)'], {}), '(raw_data, -1)\n', (8676, 8690), True, 'import numpy as np\n'), ((8832, 8860), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-4)'], {}), '(raw_data, -4)\n', (8846, 8860), True, 'import numpy as np\n'), ((9552, 9565), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (9560, 9565), True, 'import numpy as np\n'), ((9615, 9625), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (9622, 9625), True, 'import numpy as np\n'), ((9675, 9690), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (9682, 9690), True, 'import numpy as np\n'), ((9968, 9983), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (9976, 9983), True, 'import numpy as np\n'), ((10108, 10123), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10115, 10123), True, 'import numpy as np\n'), ((10401, 10417), 'numpy.atleast_3d', 'np.atleast_3d', (['x'], {}), '(x)\n', (10414, 10417), True, 'import numpy as np\n'), ((10625, 10644), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (10631, 10644), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10921, 10941), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (10927, 10941), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((11659, 11679), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (11665, 11679), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12356, 12377), 'mars.tensor.expressions.datasource.arange', 'arange', (['(120)'], {'chunks': '(3)'}), '(120, chunks=3)\n', (12362, 12377), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12716, 12736), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (12722, 12736), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13072, 13092), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (13078, 13092), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15187, 15206), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (15193, 15206), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17501, 17519), 'numpy.array', 'np.array', (['[88, 99]'], {}), '([88, 99])\n', (17509, 17519), True, 'import numpy as np\n'), ((19596, 19623), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)', '(0)', '(-1)'], {'chunks': '(2)'}), '(10, 0, -1, chunks=2)\n', (19602, 19623), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19801, 19820), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (19807, 19820), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19879, 19915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (19885, 19915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20722, 20728), 'mars.tensor.expressions.base.cov', 'cov', (['X'], {}), '(X)\n', (20725, 20728), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21198, 21217), 'mars.tensor.expressions.datasource.arange', 'arange', (['(8)'], {'chunks': '(2)'}), '(8, chunks=2)\n', (21204, 21217), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9246, 9267), 'numpy.ones', 'np.ones', (['(3, 4, 5, 6)'], {}), '((3, 4, 5, 6))\n', (9253, 9267), True, 'import numpy as np\n'), ((10047, 10057), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10054, 10057), True, 'import numpy as np\n'), ((10481, 10491), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10488, 10491), True, 'import numpy as np\n'), ((10556, 10571), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10563, 10571), True, 'import numpy as np\n'), ((11109, 11122), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11118, 11122), True, 'import numpy as np\n'), ((11436, 11449), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11445, 11449), True, 'import numpy as np\n'), ((11835, 11848), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11844, 11848), True, 'import numpy as np\n'), ((12150, 12163), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12159, 12163), True, 'import numpy as np\n'), ((12528, 12542), 'numpy.arange', 'np.arange', (['(120)'], {}), '(120)\n', (12537, 12542), True, 'import numpy as np\n'), ((12886, 12899), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12895, 12899), True, 'import numpy as np\n'), ((13242, 13255), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (13251, 13255), True, 'import numpy as np\n'), ((14172, 14185), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14181, 14185), True, 'import numpy as np\n'), ((14374, 14387), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14383, 14387), True, 'import numpy as np\n'), ((14584, 14597), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14593, 14597), True, 'import numpy as np\n'), ((15338, 15350), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15347, 15350), True, 'import numpy as np\n'), ((15538, 15550), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15547, 15550), True, 'import numpy as np\n'), ((15717, 15729), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15726, 15729), True, 'import numpy as np\n'), ((20004, 20016), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (20013, 20016), True, 'import numpy as np\n'), ((20187, 20223), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (20193, 20223), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((21351, 21363), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21360, 21363), True, 'import numpy as np\n'), ((21546, 21558), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21555, 21558), True, 'import numpy as np\n'), ((21742, 21754), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21751, 21754), True, 'import numpy as np\n'), ((21935, 21947), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21944, 21947), True, 'import numpy as np\n'), ((24802, 24821), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (24808, 24821), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10781, 10793), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (10790, 10793), True, 'import numpy as np\n'), ((25017, 25029), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25026, 25029), True, 'import numpy as np\n'), ((25422, 25434), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25431, 25434), True, 'import numpy as np\n'), ((25854, 25866), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25863, 25866), True, 'import numpy as np\n')] |
import sys
import matplotlib
matplotlib.use('Agg')
sys.path.insert(0, 'lib')
| [
"matplotlib.use",
"sys.path.insert"
]
| [((29, 50), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (43, 50), False, 'import matplotlib\n'), ((51, 76), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""lib"""'], {}), "(0, 'lib')\n", (66, 76), False, 'import sys\n')] |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| [
"numpy.polyfit",
"numpy.array",
"numpy.isfinite",
"numpy.poly1d",
"numpy.histogram",
"numpy.polynomial.chebyshev.chebval",
"numpy.polynomial.legendre.legfit",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.nanmax",
"glob.glob",
"numpy.abs",
"getopt.getopt",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"numpy.in1d",
"scipy.stats.norm.pdf",
"numpy.nansum",
"warnings.filterwarnings",
"numpy.polynomial.chebyshev.chebfit",
"astropy.table.Table.read",
"matplotlib.pyplot.subplots_adjust",
"numpy.polynomial.legendre.legval",
"numpy.logical_and",
"numpy.nanmedian",
"numpy.logical_or",
"os.chdir",
"numpy.sum",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
]
| [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((323, 356), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (346, 356), False, 'import warnings\n'), ((4237, 4302), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')"], {}), "(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')\n", (4247, 4302), False, 'from astropy.table import Table, join\n'), ((4314, 4390), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')"], {}), "(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')\n", (4324, 4390), False, 'from astropy.table import Table, join\n'), ((4513, 4580), 'numpy.in1d', 'np.in1d', (["tails_data['source_id']", "CG_data['source_id']"], {'invert': '(True)'}), "(tails_data['source_id'], CG_data['source_id'], invert=True)\n", (4520, 4580), True, 'import numpy as np\n'), ((5027, 5051), 'os.chdir', 'chdir', (['data_dir_clusters'], {}), '(data_dir_clusters)\n', (5032, 5051), False, 'from os import chdir, system\n'), ((5071, 5103), 'glob.glob', 'glob', (['"""Cluster_orbits_GaiaDR2_*"""'], {}), "('Cluster_orbits_GaiaDR2_*')\n", (5075, 5103), False, 'from glob import glob\n'), ((426, 462), 'numpy.linspace', 'np.linspace', (['range[0]', 'range[1]', '(250)'], {}), '(range[0], range[1], 250)\n', (437, 462), True, 'import numpy as np\n'), ((476, 497), 'numpy.zeros_like', 'np.zeros_like', (['x_vals'], {}), '(x_vals)\n', (489, 497), True, 'import numpy as np\n'), ((929, 968), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins', 'range': 'range'}), '(d, bins=bins, range=range)\n', (941, 968), True, 'import numpy as np\n'), ((981, 1008), 'numpy.abs', 'np.abs', (['(edges[0] - edges[1])'], {}), '(edges[0] - edges[1])\n', (987, 1008), True, 'import numpy as np\n'), ((1255, 1275), 'numpy.nanstd', 'np.nanstd', (['diff[idx]'], {}), '(diff[idx])\n', (1264, 1275), True, 'import numpy as np\n'), ((1332, 1411), 'numpy.logical_or', 'np.logical_or', (['(diff < -1.0 * std_diff * sigma_low)', '(diff > std_diff * sigma_high)'], {}), '(diff < -1.0 * std_diff * sigma_low, diff > std_diff * sigma_high)\n', (1345, 1411), True, 'import numpy as np\n'), ((1458, 1491), 'numpy.logical_and', 'np.logical_and', (['idx', '(~idx_outlier)'], {}), '(idx, ~idx_outlier)\n', (1472, 1491), True, 'import numpy as np\n'), ((1746, 1761), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1752, 1761), True, 'import numpy as np\n'), ((1787, 1802), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1793, 1802), True, 'import numpy as np\n'), ((1874, 1894), 'numpy.nanmedian', 'np.nanmedian', (['p_data'], {}), '(p_data)\n', (1886, 1894), True, 'import numpy as np\n'), ((3046, 3072), 'numpy.nanstd', 'np.nanstd', (['(a_data - f_data)'], {}), '(a_data - f_data)\n', (3055, 3072), True, 'import numpy as np\n'), ((3829, 3895), 'getopt.getopt', 'getopt', (['argv[1:]', '""""""', "['dr3=', 'suffix=', 'flags=', 'individual=']"], {}), "(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])\n", (3835, 3895), False, 'from getopt import getopt\n'), ((4794, 4846), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'GALAH_iDR3_main_191213.fits')"], {}), "(data_dir + 'GALAH_iDR3_main_191213.fits')\n", (4804, 4846), False, 'from astropy.table import Table, join\n'), ((5109, 5127), 'os.chdir', 'chdir', (['cluster_dir'], {}), '(cluster_dir)\n', (5114, 5127), False, 'from os import chdir, system\n'), ((5199, 5208), 'glob.glob', 'glob', (['"""*"""'], {}), "('*')\n", (5203, 5208), False, 'from glob import glob\n'), ((14816, 14827), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (14821, 14827), False, 'from os import chdir, system\n'), ((1689, 1708), 'numpy.isfinite', 'np.isfinite', (['p_data'], {}), '(p_data)\n', (1700, 1708), True, 'import numpy as np\n'), ((1710, 1729), 'numpy.isfinite', 'np.isfinite', (['a_data'], {}), '(a_data)\n', (1721, 1729), True, 'import numpy as np\n'), ((2824, 2839), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (2830, 2839), True, 'import numpy as np\n'), ((3230, 3286), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3261, 3286), True, 'import numpy as np\n'), ((3328, 3382), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3357, 3382), True, 'import numpy as np\n'), ((5740, 5754), 'os.chdir', 'chdir', (['sub_dir'], {}), '(sub_dir)\n', (5745, 5754), False, 'from os import chdir, system\n'), ((7540, 7551), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (7545, 7551), False, 'from os import chdir, system\n'), ((651, 693), 'scipy.stats.norm.pdf', 'gauss_norm.pdf', (['x_vals'], {'loc': 'd_m', 'scale': 'd_s'}), '(x_vals, loc=d_m, scale=d_s)\n', (665, 693), True, 'from scipy.stats import norm as gauss_norm\n'), ((758, 775), 'numpy.nansum', 'np.nansum', (['y_vals'], {}), '(y_vals)\n', (767, 775), True, 'import numpy as np\n'), ((811, 828), 'numpy.nanmax', 'np.nanmax', (['y_vals'], {}), '(y_vals)\n', (820, 828), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.nanmax', 'np.nanmax', (['heights'], {}), '(heights)\n', (1062, 1071), True, 'import numpy as np\n'), ((2005, 2092), 'numpy.polynomial.chebyshev.chebfit', 'np.polynomial.chebyshev.chebfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2036, 2092), True, 'import numpy as np\n'), ((2110, 2166), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2141, 2166), True, 'import numpy as np\n'), ((2214, 2299), 'numpy.polynomial.legendre.legfit', 'np.polynomial.legendre.legfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2243, 2299), True, 'import numpy as np\n'), ((2317, 2371), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2346, 2371), True, 'import numpy as np\n'), ((2418, 2480), 'numpy.polyfit', 'np.polyfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit], order)\n', (2428, 2480), True, 'import numpy as np\n'), ((3423, 3438), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (3432, 3438), True, 'import numpy as np\n'), ((5501, 5523), 'numpy.array', 'np.array', (['source_id_cg'], {}), '(source_id_cg)\n', (5509, 5523), True, 'import numpy as np\n'), ((5578, 5602), 'numpy.array', 'np.array', (['source_id_tail'], {}), '(source_id_tail)\n', (5586, 5602), True, 'import numpy as np\n'), ((5790, 5858), 'astropy.table.Table.read', 'Table.read', (['"""members_init_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('members_init_galah.csv', format='ascii', delimiter='\\t')\n", (5800, 5858), False, 'from astropy.table import Table, join\n'), ((5882, 5936), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_init['source_id']"], {}), "(cannon_data['source_id'], g_init['source_id'])\n", (5889, 5936), True, 'import numpy as np\n'), ((6046, 6118), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1.csv', format='ascii', delimiter='\\t')\n", (6056, 6118), False, 'from astropy.table import Table, join\n'), ((6138, 6216), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1_galah.csv', format='ascii', delimiter='\\t')\n", (6148, 6216), False, 'from astropy.table import Table, join\n'), ((6715, 6767), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_in['source_id']"], {}), "(cannon_data['source_id'], g_in['source_id'])\n", (6722, 6767), True, 'import numpy as np\n'), ((7097, 7175), 'astropy.table.Table.read', 'Table.read', (['"""possible_outside-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_outside-step1_galah.csv', format='ascii', delimiter='\\t')\n", (7107, 7175), False, 'from astropy.table import Table, join\n'), ((7406, 7459), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_out['source_id']"], {}), "(cannon_data['source_id'], g_out['source_id'])\n", (7413, 7459), True, 'import numpy as np\n'), ((9017, 9071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['y_cols_fig', 'x_cols_fig'], {'figsize': '(15, 10)'}), '(y_cols_fig, x_cols_fig, figsize=(15, 10))\n', (9029, 9071), True, 'import matplotlib.pyplot as plt\n'), ((11756, 11790), 'numpy.isfinite', 'np.isfinite', (['cannon_data[teff_col]'], {}), '(cannon_data[teff_col])\n', (11767, 11790), True, 'import numpy as np\n'), ((11956, 11988), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (11970, 11988), True, 'import numpy as np\n'), ((12010, 12043), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (12024, 12043), True, 'import numpy as np\n'), ((12065, 12096), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (12079, 12096), True, 'import numpy as np\n'), ((12118, 12151), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (12132, 12151), True, 'import numpy as np\n'), ((14564, 14658), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.97)', 'bottom': '(0.02)', 'left': '(0.04)', 'right': '(0.98)', 'hspace': '(0.3)', 'wspace': '(0.3)'}), '(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=\n 0.3, wspace=0.3)\n', (14583, 14658), True, 'import matplotlib.pyplot as plt\n'), ((14691, 14787), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png')"], {'dpi': '(250)'}), "('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix +\n '.png', dpi=250)\n", (14702, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14796, 14810), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14805, 14810), True, 'import matplotlib.pyplot as plt\n'), ((598, 621), 'numpy.isfinite', 'np.isfinite', (['[d_m, d_s]'], {}), '([d_m, d_s])\n', (609, 621), True, 'import numpy as np\n'), ((2502, 2517), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (2511, 2517), True, 'import numpy as np\n'), ((6312, 6404), 'numpy.logical_and', 'np.logical_and', (["(g_in_all['time_in_cluster'] >= 1.0)", "(g_in_all['in_cluster_prob'] >= 68.0)"], {}), "(g_in_all['time_in_cluster'] >= 1.0, g_in_all[\n 'in_cluster_prob'] >= 68.0)\n", (6326, 6404), True, 'import numpy as np\n'), ((6576, 6655), 'numpy.logical_and', 'np.logical_and', (["(g_in['time_in_cluster'] >= 1.0)", "(g_in['in_cluster_prob'] >= 68.0)"], {}), "(g_in['time_in_cluster'] >= 1.0, g_in['in_cluster_prob'] >= 68.0)\n", (6590, 6655), True, 'import numpy as np\n'), ((7265, 7341), 'numpy.logical_and', 'np.logical_and', (["(g_out['time_in_cluster'] <= 0)", "(g_out['in_cluster_prob'] <= 0)"], {}), "(g_out['time_in_cluster'] <= 0, g_out['in_cluster_prob'] <= 0)\n", (7279, 7341), True, 'import numpy as np\n'), ((7564, 7580), 'numpy.sum', 'np.sum', (['idx_init'], {}), '(idx_init)\n', (7570, 7580), True, 'import numpy as np\n'), ((7589, 7603), 'numpy.sum', 'np.sum', (['idx_in'], {}), '(idx_in)\n', (7595, 7603), True, 'import numpy as np\n'), ((7612, 7627), 'numpy.sum', 'np.sum', (['idx_out'], {}), '(idx_out)\n', (7618, 7627), True, 'import numpy as np\n'), ((9449, 9478), 'numpy.isfinite', 'np.isfinite', (['cannon_data[col]'], {}), '(cannon_data[col])\n', (9460, 9478), True, 'import numpy as np\n'), ((9613, 9645), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (9627, 9645), True, 'import numpy as np\n'), ((9671, 9704), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (9685, 9704), True, 'import numpy as np\n'), ((9730, 9761), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (9744, 9761), True, 'import numpy as np\n'), ((9787, 9823), 'numpy.logical_and', 'np.logical_and', (['idx_cg_memb', 'idx_val'], {}), '(idx_cg_memb, idx_val)\n', (9801, 9823), True, 'import numpy as np\n'), ((9849, 9882), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (9863, 9882), True, 'import numpy as np\n'), ((11841, 11890), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (11855, 11890), True, 'import numpy as np\n'), ((12957, 12971), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (12963, 12971), True, 'import numpy as np\n'), ((13352, 13366), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (13358, 13366), True, 'import numpy as np\n'), ((13368, 13382), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (13374, 13382), True, 'import numpy as np\n'), ((13384, 13398), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (13390, 13398), True, 'import numpy as np\n'), ((14097, 14111), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (14103, 14111), True, 'import numpy as np\n'), ((14344, 14358), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (14350, 14358), True, 'import numpy as np\n'), ((14360, 14374), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (14366, 14374), True, 'import numpy as np\n'), ((14376, 14390), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (14382, 14390), True, 'import numpy as np\n'), ((6875, 6930), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "CG_data['source_id']"], {}), "(cannon_data['source_id'], CG_data['source_id'])\n", (6882, 6930), True, 'import numpy as np\n'), ((9537, 9586), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (9551, 9586), True, 'import numpy as np\n'), ((11052, 11066), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (11058, 11066), True, 'import numpy as np\n'), ((11395, 11409), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (11401, 11409), True, 'import numpy as np\n'), ((11411, 11425), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (11417, 11425), True, 'import numpy as np\n'), ((11427, 11441), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (11433, 11441), True, 'import numpy as np\n'), ((11125, 11155), 'numpy.logical_and', 'np.logical_and', (['idx_u3', 'idx_u5'], {}), '(idx_u3, idx_u5)\n', (11139, 11155), True, 'import numpy as np\n')] |
#===============================================================
# @author: <EMAIL>
# @written: 08 December 2021
# @desc: Routes for the Backend server
#===============================================================
# Import section with referecne of entry file or main file;
from __main__ import application
from flask import jsonify, render_template, url_for, request, redirect
# Local sample data import
from app.config.uiconfig import app_ui_config
from app import sample_data
# ==============================================================
# App Routes/Gateways
# ==============================================================
@application.route('/test', methods=['GET'])
def test():
return '<h4>HELLO WORLD!</h4><hr/> it works!'
@application.route('/', methods=['GET'])
@application.route('/home', methods=['GET'])
@application.route('/dashboard', methods=['GET'])
def root():
return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/history', methods=['GET'])
def history():
return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data)
@application.route('/about', methods=['GET'])
def about():
return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/get-notes', methods=['POST'])
def get_todo():
print("KEY :: VALUE (from the received form data)")
print([(key, val) for key, val in zip(request.form.keys(), request.form.values())])
return redirect("/notes", code=302)
@application.route('/notes')
def info():
return render_template("notes.html", app_data=app_ui_config)
@application.route('/sample-data')
def get_sample_data():
return jsonify(app_ui_config)
# ==============================================================
# Error Handlers Starts
# ==============================================================
# 404 Handler; We can also pass the specific request errors codes to the decorator;
@application.errorhandler(404)
def not_found(err):
return render_template("error.html", app_data=app_ui_config, error_data=err), 400
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(TypeError)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(Exception)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# ==============================================================
# Error Handlers Ends
# ==============================================================
# Route For Sample data
@application.route('/data')
def get_data():
data = {
"reports": [
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_1",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"},
"total_time": "35 min."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_2",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"},
"total_time": "0.2345 secs."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_3",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"},
"total_time": "5 days"
}
]
}
return jsonify(data)
# ==============================================================
# Extra routes starts
# ==============================================================
@application.route('/sample1')
def sample1():
return render_template("web-analytics-overview.html")
@application.route('/sample2')
def sample2():
return render_template("web-analytics-real-time.html")
@application.route('/logo')
def get_logo():
"""
Queries the snapshot data for both Serenity and JMeter projects from the MongoDB.
Renders the Snapshot view of html
:return: N/A
"""
# set template directory of the Flask App to the path set by the user as command line arg.
return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \
f'Building image from static public location: <br/> ' \
f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
| [
"flask.render_template",
"flask.request.form.keys",
"__main__.application.route",
"__main__.application.logger.exception",
"flask.url_for",
"flask.redirect",
"flask.request.form.values",
"__main__.application.errorhandler",
"flask.jsonify"
]
| [((659, 702), '__main__.application.route', 'application.route', (['"""/test"""'], {'methods': "['GET']"}), "('/test', methods=['GET'])\n", (676, 702), False, 'from __main__ import application\n'), ((771, 810), '__main__.application.route', 'application.route', (['"""/"""'], {'methods': "['GET']"}), "('/', methods=['GET'])\n", (788, 810), False, 'from __main__ import application\n'), ((813, 856), '__main__.application.route', 'application.route', (['"""/home"""'], {'methods': "['GET']"}), "('/home', methods=['GET'])\n", (830, 856), False, 'from __main__ import application\n'), ((859, 907), '__main__.application.route', 'application.route', (['"""/dashboard"""'], {'methods': "['GET']"}), "('/dashboard', methods=['GET'])\n", (876, 907), False, 'from __main__ import application\n'), ((1027, 1073), '__main__.application.route', 'application.route', (['"""/history"""'], {'methods': "['GET']"}), "('/history', methods=['GET'])\n", (1044, 1073), False, 'from __main__ import application\n'), ((1195, 1239), '__main__.application.route', 'application.route', (['"""/about"""'], {'methods': "['GET']"}), "('/about', methods=['GET'])\n", (1212, 1239), False, 'from __main__ import application\n'), ((1356, 1405), '__main__.application.route', 'application.route', (['"""/get-notes"""'], {'methods': "['POST']"}), "('/get-notes', methods=['POST'])\n", (1373, 1405), False, 'from __main__ import application\n'), ((1616, 1643), '__main__.application.route', 'application.route', (['"""/notes"""'], {}), "('/notes')\n", (1633, 1643), False, 'from __main__ import application\n'), ((1730, 1763), '__main__.application.route', 'application.route', (['"""/sample-data"""'], {}), "('/sample-data')\n", (1747, 1763), False, 'from __main__ import application\n'), ((2075, 2104), '__main__.application.errorhandler', 'application.errorhandler', (['(404)'], {}), '(404)\n', (2099, 2104), False, 'from __main__ import application\n'), ((2302, 2337), '__main__.application.errorhandler', 'application.errorhandler', (['TypeError'], {}), '(TypeError)\n', (2326, 2337), False, 'from __main__ import application\n'), ((2577, 2612), '__main__.application.errorhandler', 'application.errorhandler', (['Exception'], {}), '(Exception)\n', (2601, 2612), False, 'from __main__ import application\n'), ((2947, 2973), '__main__.application.route', 'application.route', (['"""/data"""'], {}), "('/data')\n", (2964, 2973), False, 'from __main__ import application\n'), ((4596, 4625), '__main__.application.route', 'application.route', (['"""/sample1"""'], {}), "('/sample1')\n", (4613, 4625), False, 'from __main__ import application\n'), ((4707, 4736), '__main__.application.route', 'application.route', (['"""/sample2"""'], {}), "('/sample2')\n", (4724, 4736), False, 'from __main__ import application\n'), ((4819, 4845), '__main__.application.route', 'application.route', (['"""/logo"""'], {}), "('/logo')\n", (4836, 4845), False, 'from __main__ import application\n'), ((933, 1025), 'flask.render_template', 'render_template', (['"""dashboard.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.latest_data'}), "('dashboard.html', app_data=app_ui_config, data=sample_data.\n latest_data)\n", (948, 1025), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1102, 1193), 'flask.render_template', 'render_template', (['"""history.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.history_data'}), "('history.html', app_data=app_ui_config, data=sample_data.\n history_data)\n", (1117, 1193), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1266, 1354), 'flask.render_template', 'render_template', (['"""about.html"""'], {'app_data': 'app_ui_config', 'data': 'sample_data.latest_data'}), "('about.html', app_data=app_ui_config, data=sample_data.\n latest_data)\n", (1281, 1354), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1581, 1609), 'flask.redirect', 'redirect', (['"""/notes"""'], {'code': '(302)'}), "('/notes', code=302)\n", (1589, 1609), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1669, 1722), 'flask.render_template', 'render_template', (['"""notes.html"""'], {'app_data': 'app_ui_config'}), "('notes.html', app_data=app_ui_config)\n", (1684, 1722), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1800, 1822), 'flask.jsonify', 'jsonify', (['app_ui_config'], {}), '(app_ui_config)\n', (1807, 1822), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2367, 2400), '__main__.application.logger.exception', 'application.logger.exception', (['err'], {}), '(err)\n', (2395, 2400), False, 'from __main__ import application\n'), ((2642, 2675), '__main__.application.logger.exception', 'application.logger.exception', (['err'], {}), '(err)\n', (2670, 2675), False, 'from __main__ import application\n'), ((4423, 4436), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (4430, 4436), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((4654, 4700), 'flask.render_template', 'render_template', (['"""web-analytics-overview.html"""'], {}), "('web-analytics-overview.html')\n", (4669, 4700), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((4765, 4812), 'flask.render_template', 'render_template', (['"""web-analytics-real-time.html"""'], {}), "('web-analytics-real-time.html')\n", (4780, 4812), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2138, 2207), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2153, 2207), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2413, 2482), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2428, 2482), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((2688, 2757), 'flask.render_template', 'render_template', (['"""error.html"""'], {'app_data': 'app_ui_config', 'error_data': 'err'}), "('error.html', app_data=app_ui_config, error_data=err)\n", (2703, 2757), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((5314, 5359), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': '"""images/logo.svg"""'}), "('static', filename='images/logo.svg')\n", (5321, 5359), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1523, 1542), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (1540, 1542), False, 'from flask import jsonify, render_template, url_for, request, redirect\n'), ((1544, 1565), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (1563, 1565), False, 'from flask import jsonify, render_template, url_for, request, redirect\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.