max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tools/manylinux1/build_scripts/ssl-check.py | limeng357/Paddle | 17,085 | 12711193 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cf. https://github.com/pypa/manylinux/issues/53
GOOD_SSL = "https://google.com"
BAD_SSL = "https://self-signed.badssl.com"
import sys
print("Testing SSL certificate checking for Python:", sys.version)
if (sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4)):
print("This version never checks SSL certs; skipping tests")
sys.exit(0)
if sys.version_info[0] >= 3:
from urllib.request import urlopen
EXC = OSError
else:
from urllib import urlopen
EXC = IOError
print("Connecting to %s should work" % (GOOD_SSL, ))
urlopen(GOOD_SSL)
print("...it did, yay.")
print("Connecting to %s should fail" % (BAD_SSL, ))
try:
urlopen(BAD_SSL)
# If we get here then we failed:
print("...it DIDN'T!!!!!11!!1one!")
sys.exit(1)
except EXC:
print("...it did, yay.")
|
tests/pytests/unit/renderers/test_toml.py | babs/salt | 9,425 | 12711194 | <reponame>babs/salt<gh_stars>1000+
import pytest
import salt.renderers.tomlmod
import salt.serializers.toml
@pytest.mark.skipif(
salt.serializers.toml.HAS_TOML is False, reason="The 'toml' library is missing"
)
def test_toml_render_string():
data = """[[user-sshkey."ssh_auth.present"]]
user = "username"
[[user-sshkey."ssh_auth.present"]]
config = "%h/.ssh/authorized_keys"
[[user-sshkey."ssh_auth.present"]]
names = [
"hereismykey",
"anotherkey"
]
"""
expected_result = {
"user-sshkey": {
"ssh_auth.present": [
{"user": "username"},
{"config": "%h/.ssh/authorized_keys"},
{"names": ["hereismykey", "anotherkey"]},
]
}
}
result = salt.renderers.tomlmod.render(data)
assert result == expected_result
|
tests/core/utils/test_time.py | cercos/masonite | 1,816 | 12711208 | import pendulum
from tests import TestCase
from src.masonite.utils.time import (
migration_timestamp,
parse_human_time,
cookie_expire_time,
)
class TestTimeUtils(TestCase):
def tearDown(self):
super().tearDown()
self.restoreTime()
def test_parse_human_time_now(self):
ref_time = pendulum.datetime(2021, 1, 1)
self.fakeTime(ref_time)
instance = parse_human_time("now")
self.assertEqual(ref_time, instance)
def test_parse_human_time_expired(self):
self.fakeTime(pendulum.datetime(2021, 1, 1))
instance = parse_human_time("expired")
self.assertEqual(pendulum.datetime(2001, 1, 1), instance)
def test_parse_human_time(self):
self.fakeTime(pendulum.datetime(2021, 1, 1, 12, 0, 0))
self.assertEqual(
pendulum.datetime(2021, 1, 1, 12, 0, 2), parse_human_time("2 seconds")
)
self.assertEqual(
pendulum.datetime(2021, 1, 1, 12, 2, 0), parse_human_time("2 minutes")
)
self.assertEqual(
pendulum.datetime(2021, 1, 1, 14, 0, 0), parse_human_time("2 hour")
)
self.assertEqual(
pendulum.datetime(2021, 1, 2, 12, 0, 0), parse_human_time("1 day")
)
self.assertEqual(
pendulum.datetime(2021, 1, 15, 12, 0, 0), parse_human_time("2 weeks")
)
self.assertEqual(
pendulum.datetime(2021, 4, 1, 12, 0, 0), parse_human_time("3 months")
)
self.assertEqual(
pendulum.datetime(2030, 1, 1, 12, 0, 0), parse_human_time("9 years")
)
self.assertEqual(None, parse_human_time("10 nanoseconds"))
def test_cookie_expire_time(self):
self.fakeTime(pendulum.datetime(2021, 1, 21, 7, 28, 0))
expiration_time_str = cookie_expire_time("7 days")
self.assertEqual(expiration_time_str, "Thu, 28 Jan 2021 07:28:00")
def test_migration_timestamp(self):
self.fakeTime(pendulum.datetime(2021, 10, 25, 8, 12, 54))
self.assertEqual(migration_timestamp(), "2021_10_25_081254")
|
Tools/Scenarios/list_bg.py | ErQing/Nova | 212 | 12711216 | #!/usr/bin/env python3
from luaparser import astnodes
from nova_script_parser import get_node_name, parse_chapters, walk_functions
in_filename = 'scenario.txt'
def do_chapter(entries, bg_list):
for code, _, _ in entries:
if not code:
continue
for func_name, args, _ in walk_functions(code):
if (func_name in [
'show', 'trans', 'trans2', 'trans_fade', 'trans_left',
'trans_right', 'trans_up', 'trans_down'
] and args and get_node_name(args[0]).startswith('bg')
and isinstance(args[1], astnodes.String)):
bg_name = args[1].s
if bg_name not in bg_list:
bg_list.append(bg_name)
elif (func_name == 'show_loop' and args
and get_node_name(args[0]).startswith('bg')):
for field in args[1].fields:
bg_name = field.value.s
if bg_name not in bg_list:
bg_list.append(bg_name)
def main():
with open(in_filename, 'r', encoding='utf-8') as f:
chapters = parse_chapters(f)
bg_list = []
for chapter_name, entries, _, _ in chapters:
print(chapter_name)
do_chapter(entries, bg_list)
print()
for x in bg_list:
print(x)
if __name__ == '__main__':
main()
|
installer/core/providers/aws/boto3/es.py | jonico/pacbot | 1,165 | 12711219 | from core.providers.aws.boto3 import prepare_aws_client_with_given_cred
import boto3
def get_es_client(aws_auth_cred):
"""
Returns the client object for AWS Elasticsearch
Args:
aws_auth (dict): Dict containing AWS credentials
Returns:
obj: AWS Elasticsearch Object
"""
return prepare_aws_client_with_given_cred("es", aws_auth_cred)
def check_es_domain_exists(domain_name, aws_auth_cred):
"""
Check wheter the given ES Domain already exists in the AWS Account
Args:
domain_name (str): ES Domain name
aws_auth (dict): Dict containing AWS credentials
Returns:
Boolean: True if env exists else False
"""
client = get_es_client(aws_auth_cred)
try:
response = client.describe_elasticsearch_domain(
DomainName=domain_name
)
return True if response['DomainStatus'] else False
except:
return False
|
fixtures/tmva_net.py | kgarg8/torchinfo | 736 | 12711242 | # type: ignore
# pylint: skip-file
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConvBlock(nn.Module):
"""(2D conv => BN => LeakyReLU) * 2"""
def __init__(self, in_ch, out_ch, k_size, pad, dil):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=k_size, padding=pad, dilation=dil),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=k_size, padding=pad, dilation=dil),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
)
def forward(self, x):
x = self.block(x)
return x
class Double3DConvBlock(nn.Module):
"""(3D conv => BN => LeakyReLU) * 2"""
def __init__(self, in_ch, out_ch, k_size, pad, dil):
super().__init__()
self.block = nn.Sequential(
nn.Conv3d(in_ch, out_ch, kernel_size=k_size, padding=pad, dilation=dil),
nn.BatchNorm3d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv3d(out_ch, out_ch, kernel_size=k_size, padding=pad, dilation=dil),
nn.BatchNorm3d(out_ch),
nn.LeakyReLU(inplace=True),
)
def forward(self, x):
x = self.block(x)
return x
class ConvBlock(nn.Module):
"""(2D conv => BN => LeakyReLU)"""
def __init__(self, in_ch, out_ch, k_size, pad, dil):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=k_size, padding=pad, dilation=dil),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
)
def forward(self, x):
x = self.block(x)
return x
class ASPPBlock(nn.Module):
"""Atrous Spatial Pyramid Pooling
Parallel conv blocks with different dilation rate
"""
def __init__(self, in_ch, out_ch=256):
super().__init__()
self.global_avg_pool = nn.AvgPool2d((64, 64))
self.conv1_1x1 = nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0, dilation=1)
self.single_conv_block1_1x1 = ConvBlock(in_ch, out_ch, k_size=1, pad=0, dil=1)
self.single_conv_block1_3x3 = ConvBlock(in_ch, out_ch, k_size=3, pad=6, dil=6)
self.single_conv_block2_3x3 = ConvBlock(in_ch, out_ch, k_size=3, pad=12, dil=12)
self.single_conv_block3_3x3 = ConvBlock(in_ch, out_ch, k_size=3, pad=18, dil=18)
def forward(self, x):
x1 = F.interpolate(
self.global_avg_pool(x), size=(64, 64), align_corners=False, mode="bilinear"
)
x1 = self.conv1_1x1(x1)
x2 = self.single_conv_block1_1x1(x)
x3 = self.single_conv_block1_3x3(x)
x4 = self.single_conv_block2_3x3(x)
x5 = self.single_conv_block3_3x3(x)
x_cat = torch.cat((x2, x3, x4, x5, x1), 1)
return x_cat
class EncodingBranch(nn.Module):
"""
Encoding branch for a single radar view
PARAMETERS
----------
signal_type: str
Type of radar view.
Supported: 'range_doppler', 'range_angle' and 'angle_doppler'
"""
def __init__(self, signal_type):
super().__init__()
self.signal_type = signal_type
self.double_3dconv_block1 = Double3DConvBlock(
in_ch=1, out_ch=128, k_size=3, pad=(0, 1, 1), dil=1
)
self.doppler_max_pool = nn.MaxPool2d(2, stride=(2, 1))
self.max_pool = nn.MaxPool2d(2, stride=2)
self.double_conv_block2 = DoubleConvBlock(
in_ch=128, out_ch=128, k_size=3, pad=1, dil=1
)
self.single_conv_block1_1x1 = ConvBlock(
in_ch=128, out_ch=128, k_size=1, pad=0, dil=1
)
def forward(self, x):
x1 = self.double_3dconv_block1(x)
x1 = torch.squeeze(x1, 2) # remove temporal dimension
if self.signal_type in ("range_doppler", "angle_doppler"):
# The Doppler dimension requires a specific processing
x1_pad = F.pad(x1, (0, 1, 0, 0), "constant", 0)
x1_down = self.doppler_max_pool(x1_pad)
else:
x1_down = self.max_pool(x1)
x2 = self.double_conv_block2(x1_down)
if self.signal_type in ("range_doppler", "angle_doppler"):
# The Doppler dimension requires a specific processing
x2_pad = F.pad(x2, (0, 1, 0, 0), "constant", 0)
x2_down = self.doppler_max_pool(x2_pad)
else:
x2_down = self.max_pool(x2)
x3 = self.single_conv_block1_1x1(x2_down)
# return input of ASPP block + latent features
return x2_down, x3
class TMVANet_Encoder(nn.Module):
"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""
def __init__(self, n_classes, n_frames):
super().__init__()
self.n_classes = n_classes
self.n_frames = n_frames
# Backbone (encoding)
self.rd_encoding_branch = EncodingBranch("range_doppler")
self.ra_encoding_branch = EncodingBranch("range_angle")
self.ad_encoding_branch = EncodingBranch("angle_doppler")
# ASPP Blocks
self.rd_aspp_block = ASPPBlock(in_ch=128, out_ch=128)
self.ra_aspp_block = ASPPBlock(in_ch=128, out_ch=128)
self.ad_aspp_block = ASPPBlock(in_ch=128, out_ch=128)
self.rd_single_conv_block1_1x1 = ConvBlock(
in_ch=640, out_ch=128, k_size=1, pad=0, dil=1
)
self.ra_single_conv_block1_1x1 = ConvBlock(
in_ch=640, out_ch=128, k_size=1, pad=0, dil=1
)
self.ad_single_conv_block1_1x1 = ConvBlock(
in_ch=640, out_ch=128, k_size=1, pad=0, dil=1
)
def forward(self, x_rd, x_ra, x_ad, printshape=False):
# Backbone
ra_features, ra_latent = self.ra_encoding_branch(x_ra)
rd_features, rd_latent = self.rd_encoding_branch(x_rd)
ad_features, ad_latent = self.ad_encoding_branch(x_ad)
# ASPP blocks
x1_rd = self.rd_aspp_block(rd_features)
x1_ra = self.ra_aspp_block(ra_features)
x1_ad = self.ad_aspp_block(ad_features)
x2_rd = self.rd_single_conv_block1_1x1(x1_rd)
x2_ra = self.ra_single_conv_block1_1x1(x1_ra)
x2_ad = self.ad_single_conv_block1_1x1(x1_ad)
# Features join either the RD or the RA branch
x3 = torch.cat((rd_latent, ra_latent, ad_latent), 1)
return x3, x2_rd, x2_ad, x2_ra
class TMVANet_Decoder(nn.Module):
"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""
def __init__(self, n_classes, n_frames):
super().__init__()
self.n_classes = n_classes
self.n_frames = n_frames
# Decoding
self.rd_single_conv_block2_1x1 = ConvBlock(
in_ch=384, out_ch=128, k_size=1, pad=0, dil=1
)
self.ra_single_conv_block2_1x1 = ConvBlock(
in_ch=384, out_ch=128, k_size=1, pad=0, dil=1
)
# Pallel range-Doppler (RD) and range-angle (RA) decoding branches
self.rd_upconv1 = nn.ConvTranspose2d(384, 128, (2, 1), stride=(2, 1))
self.ra_upconv1 = nn.ConvTranspose2d(384, 128, 2, stride=2)
self.rd_double_conv_block1 = DoubleConvBlock(
in_ch=128, out_ch=128, k_size=3, pad=1, dil=1
)
self.ra_double_conv_block1 = DoubleConvBlock(
in_ch=128, out_ch=128, k_size=3, pad=1, dil=1
)
self.rd_upconv2 = nn.ConvTranspose2d(128, 128, (2, 1), stride=(2, 1))
self.ra_upconv2 = nn.ConvTranspose2d(128, 128, 2, stride=2)
self.rd_double_conv_block2 = DoubleConvBlock(
in_ch=128, out_ch=128, k_size=3, pad=1, dil=1
)
self.ra_double_conv_block2 = DoubleConvBlock(
in_ch=128, out_ch=128, k_size=3, pad=1, dil=1
)
# Final 1D convs
self.rd_final = nn.Conv2d(
in_channels=128, out_channels=n_classes, kernel_size=1
)
self.ra_final = nn.Conv2d(
in_channels=128, out_channels=n_classes, kernel_size=1
)
def forward(self, x3, x2_rd, x2_ad, x2_ra):
# Parallel decoding branches with upconvs
# Latent Space
x3_rd = self.rd_single_conv_block2_1x1(x3)
x3_ra = self.ra_single_conv_block2_1x1(x3)
# Latent Space + ASPP features
x4_rd = torch.cat((x2_rd, x3_rd, x2_ad), 1)
x4_ra = torch.cat((x2_ra, x3_ra, x2_ad), 1)
x5_rd = self.rd_upconv1(x4_rd)
x5_ra = self.ra_upconv1(x4_ra)
x6_rd = self.rd_double_conv_block1(x5_rd)
x6_ra = self.ra_double_conv_block1(x5_ra)
x7_rd = self.rd_upconv2(x6_rd)
x7_ra = self.ra_upconv2(x6_ra)
x8_rd = self.rd_double_conv_block2(x7_rd)
x8_ra = self.ra_double_conv_block2(x7_ra)
# Final 1D convolutions
x9_rd = self.rd_final(x8_rd)
x9_ra = self.ra_final(x8_ra)
return x9_rd, x9_ra
class TMVANet(nn.Module):
"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""
def __init__(self, n_classes, n_frames):
super().__init__()
self.n_classes = n_classes
self.n_frames = n_frames
self.encoder = TMVANet_Encoder(n_classes, n_frames)
self.decoder = TMVANet_Decoder(n_classes, n_frames)
def forward(self, x_rd, x_ra, x_ad):
x3, x2_rd, x2_ad, x2_ra = self.encoder(x_rd, x_ra, x_ad)
x9_rd, x9_ra = self.decoder(x3, x2_rd, x2_ad, x2_ra)
return x9_rd, x9_ra
|
packages/pyright-internal/src/tests/samples/typeAlias3.py | sasano8/pyright | 4,391 | 12711327 | # This sample tests that type aliases can consist of
# partially-specialized classes that can be further
# specialized.
# pyright: strict
from typing import Callable, Generic, Literal, Tuple, Optional, TypeVar
from typing_extensions import ParamSpec
T = TypeVar("T")
P = ParamSpec("P")
ValidationResult = Tuple[bool, Optional[T]]
def foo() -> ValidationResult[str]:
return False, "valid"
class ClassA(Generic[T]):
def __new__(cls, value: T) -> "ClassA[T]":
...
TypeAliasA = ClassA[T]
a1 = ClassA(3.0)
t_a1: Literal["ClassA[float]"] = reveal_type(a1)
a2 = TypeAliasA(3.0)
t_a2: Literal["ClassA[float]"] = reveal_type(a2)
Func = Callable[P, T]
AnyFunc = Func[P, int]
AnyFunc[P]
|
crank/net/trainer/__init__.py | abeersaqib/crank | 162 | 12711328 | <gh_stars>100-1000
from .basetrainer import BaseTrainer # noqa
from .trainer_vqvae import VQVAETrainer # noqa
from .trainer_lsgan import LSGANTrainer # noqa
from .trainer_cyclegan import CycleGANTrainer # noqa
from .trainer_stargan import StarGANTrainer # noqa
from .basetrainer import TrainerWrapper # noqa
|
examples/algorithms/clustering_comparisons.py | rkalahasty/nipy | 236 | 12711375 | <filename>examples/algorithms/clustering_comparisons.py
#!/usr/bin/env python3
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Simple demo that partitions a smooth field into 10 clusters. In most cases,
Ward's clustering behaves best.
Requires matplotlib
Author: <NAME>, 2009
"""
print(__doc__)
import numpy as np
import numpy.random as nr
from scipy.ndimage import gaussian_filter
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.algorithms.graph.field import Field
dx = 50
dy = 50
dz = 1
nbseeds = 10
data = gaussian_filter( np.random.randn(dx, dy), 2)
F = Field(dx * dy * dz)
xyz = np.reshape(np.indices((dx, dy, dz)), (3, dx * dy * dz)).T.astype(np.int)
F.from_3d_grid(xyz, 6)
F.set_field(data)
seeds = np.argsort(nr.rand(F.V))[:nbseeds]
seeds, label, J0 = F.geodesic_kmeans(seeds)
wlabel, J1 = F.ward(nbseeds)
seeds, label, J2 = F.geodesic_kmeans(seeds, label=wlabel.copy(), eps=1.e-7)
print('Inertia values for the 3 algorithms: ')
print('Geodesic k-means: ', J0, 'Wards: ', J1, 'Wards + gkm: ', J2)
plt.figure(figsize=(8, 4))
plt.subplot(1, 3, 1)
plt.imshow(np.reshape(data, (dx, dy)), interpolation='nearest')
plt.title('Input data')
plt.subplot(1, 3, 2)
plt.imshow(np.reshape(wlabel, (dx, dy)), interpolation='nearest')
plt.title('Ward clustering \n into 10 components')
plt.subplot(1, 3, 3)
plt.imshow(np.reshape(label, (dx, dy)), interpolation='nearest')
plt.title('geodesic kmeans clust. \n into 10 components')
plt.show()
|
python/args-test.py | honux77/practice | 152 | 12711390 | a = [1, 2, 3, 4, 5,]
print(*a)
for i in a:
print(i, end=' ') |
Chapter09/Python 3.5/classify_image.py | littlealexchen/Deep-Learning-with-TensorFlow-master | 194 | 12711416 | <gh_stars>100-1000
import tensorflow as tf, sys
# You will be sending the image to be classified as a parameter
provided_image_path = sys.argv[1]
# then we will read the image data
provided_image_data = tf.gfile.FastGFile(provided_image_path, 'rb').read()
# Loads label file
label_lines = [line.rstrip() for line
in tf.gfile.GFile("tensorflow_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("tensorflow_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# pass the provided_image_data as input to the graph
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
netowrk_predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': provided_image_data})
# Sort the result by confidence to show the flower labels accordingly
top_predictions = netowrk_predictions[0].argsort()[-len(netowrk_predictions[0]):][::-1]
for prediction in top_predictions:
flower_type = label_lines[prediction]
score = netowrk_predictions[0][prediction]
print('%s (score = %.5f)' % (flower_type, score))
|
tests/guinea-pigs/nose/docstrings/testa.py | djeebus/teamcity-python | 105 | 12711431 | def test_func():
""" My cool test.name """
assert True
|
awdphpspear/protect.py | hillmanyoung/AWD | 146 | 12711432 | # -*- coding:utf-8 -*-
import os
import hashlib
import time
import shutil
def get_file_md5(filename):
m = hashlib.md5()
with open(filename,'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def file_md5_build(startpath):
global md5_list
global file_list
global dir_list
global root
md5_list = []
file_list = []
dir_list = []
for root,dirs,files in os.walk(startpath,topdown=True):
for d in dirs:
dir_list.append(root+'/'+d)
for f in files:
if f[-4:] == '.txt':
continue
file_list.append(root+'/'+f)
md5_list.append(get_file_md5(root+'/'+f))
def file_md5_defense():
file_backup()
global root
file_md5_build('./')
old_list = []
old_dir_list = []
new_list = []
new_dir_list = []
check_list = []
old_file_list = []
new_file_list = []
check_file_list = []
old_file_list = file_list[:]
old_list = md5_list[:]
old_dir_list = dir_list[:]
while (1):
print "*******************************************************"
print '[+]The old file total:',len(old_list)
print '[+]The old dir total:',len(old_dir_list)
print "*******************************************************"
check_list = old_list[:]
check_file_list = old_file_list[:]
file_md5_build('./')
new_list = md5_list[:]
new_file_list = file_list[:]
new_dir_list = dir_list[:]
sign2 = 0
for i in range(len(old_dir_list)):
sign3 = 0
for j in range(len(new_dir_list)):
if (old_dir_list[i] == new_dir_list[j]):
sign3 = 1
break
if sign3 == 0:
sign3 = 1
print old_dir_list[i].replace('./',''),'Disappear!'
try:
shutil.copytree(tgt+old_dir_list[i].replace('./','/'),old_dir_list[i])
print "[+]Repaired."
except:
print "[-]No such dir."
for i in range(len(new_list)):
sign = 0
for j in range(len(old_list)):
if (new_list[i] == old_list[j] and new_file_list[i] == old_file_list[j]):
check_list[j] = '0'
sign = 1
break
if sign == 0:
sign2 = 1
print new_file_list[i].replace('./',''),'Add or Changed!'
try:
os.remove(new_file_list[i])
shutil.copyfile(tgt+new_file_list[i].replace('./','/'),new_file_list[i])
print "[+]Repaired."
except:
print "[-]No such file."
for i in range(len(check_list)):
if check_list[i] != '0' and sign2 != 1:
print check_file_list[i].replace('./',''),'Disappear!'
sign2 = 0
try:
shutil.copyfile(tgt+check_file_list[i].replace('./','/'),check_file_list[i])
print "[+]Repaired."
except:
print "[-]No such file."
print "*******************************************************"
print '[+]Total file:',len(new_list)
print '[+]Total dir:',len(new_dir_list)
print "*******************************************************"
time.sleep(5)
def file_md5_check():
file_backup()
global root
file_md5_build('./')
old_list = []
old_dir_list = []
new_list = []
new_dir_list = []
check_list = []
old_file_list = []
new_file_list = []
check_file_list = []
old_file_list = file_list[:]
old_list = md5_list[:]
old_dir_list = dir_list[:]
while (1):
print "*******************************************************"
print '[+]The old file total:',len(old_list)
print '[+]The old dir total:',len(old_dir_list)
print "*******************************************************"
check_list = old_list[:]
check_file_list = old_file_list[:]
file_md5_build('./')
new_list = md5_list[:]
new_file_list = file_list[:]
new_dir_list = dir_list[:]
sign2 = 0
for i in range(len(old_dir_list)):
sign3 = 0
for j in range(len(new_dir_list)):
if (old_dir_list[i] == new_dir_list[j]):
sign3 = 1
break
if sign3 == 0:
sign3 = 1
print old_dir_list[i].replace('./',''),'Disappear!'
for i in range(len(new_list)):
sign = 0
for j in range(len(old_list)):
if (new_list[i] == old_list[j] and new_file_list[i] == old_file_list[j]):
check_list[j] = '0'
sign = 1
break
if sign == 0:
sign2 = 1
print new_file_list[i].replace('./',''),'Add or Changed!'
for i in range(len(check_list)):
if check_list[i] != '0' and sign2 != 1:
print check_file_list[i].replace('./',''),'Disappear!'
sign2 = 0
print "*******************************************************"
print '[+]Total file:',len(new_list)
print '[+]Total dir:',len(new_dir_list)
print "*******************************************************"
time.sleep(5)
def file_log_add():
php_list=[]
for root,dirs,files in os.walk('./',topdown=True):
for f in files:
if f[-4:] == '.php':
php_list.append(root+'/'+f)
for i in range(len(php_list)):
php_list[i] = php_list[i].replace('//','/')
print php_list[i]
print '[+]Total PHP file:',len(php_list)
confirm = raw_input("Confirm Open Log Monitoring. 1 or 0:")
if confirm == '1':
print "*******************************************************"
for i in range(len(php_list)):
level_dir = 0
for j in range(len(php_list[i])):
if php_list[i][j] == '/':
level_dir += 1
lines = open(php_list[i],"r").readlines()
length = len(lines)-1
for j in range(length):
if '<?php' in lines[j]:
lines[j]=lines[j].replace('<?php','<?php\nrequire_once("./'+'../'*(level_dir-1)+'log.php");')
open(php_list[i],'w').writelines(lines)
print "[+]Log monitoring turned on."
def file_backup():
src = './'
try:
shutil.copytree(src,tgt)
print "[+]File backup succeed."
except:
print "[-]File backup fail.Maybe it exists."
def file_backup_remove():
try:
shutil.rmtree(tgt)
print "[+]File backup remove succeed."
except:
print "[-]File backup remove fail.Maybe it doesn't exist."
global tgt
tgt = './backup' |
main.py | shubhamkumar906/DeepFake-Detection | 223 | 12711433 | <gh_stars>100-1000
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from apex import amp
from data_loader import create_dataloaders
from model import get_trainable_params, create_model, print_model_params
from train import train
from utils import parse_and_override_params
import foundations
# Fix random seed
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params = foundations.load_parameters()
data_dict = parse_and_override_params(params)
# Set job tags to easily spot data in use
foundations.set_tag(f'{data_dict[params["train_data"]]}: {params["train_data"]}')
# foundations.set_tag(f'big {params["train_data"]}')
print('Creating datasets')
# Get dataloaders
train_dl, val_base_dl, val_augment_dl, display_dl_iter = create_dataloaders(params)
print('Creating loss function')
# Loss function
criterion = nn.CrossEntropyLoss()
print('Creating model')
# Create model, freeze layers and change last layer
model = create_model(bool(params['use_hidden_layer']), params['dropout'])
_ = print_model_params(model)
params_to_update = get_trainable_params(model)
print('Creating optimizer')
# Create optimizer and learning rate schedules
optimizer = optim.Adam(params_to_update, lr=params['max_lr'], weight_decay=params['weight_decay'])
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
# Learning rate scheme
if bool(params['use_lr_scheduler']):
step_size_up = int(params['n_epochs'] * len(train_dl) * 0.3)
step_size_down = params['n_epochs'] * len(train_dl) - step_size_up
scheduler = lr_scheduler.OneCycleLR(optimizer, params['max_lr'], total_steps=None,
epochs=params['n_epochs'], steps_per_epoch=len(train_dl),
pct_start=params['pct_start'], anneal_strategy='cos',
cycle_momentum=False)
else:
scheduler = None
print('Training start..')
# Train
train(train_dl, val_base_dl, val_augment_dl, display_dl_iter, model, optimizer, params['n_epochs'], params['max_lr'], scheduler, criterion,
train_source=params["train_data"])
|
example/validators/with_python/development_settings.py | rroden12/dynaconf | 2,293 | 12711464 | <gh_stars>1000+
EXAMPLE = True
MYSQL_HOST = "development.com"
VERSION = 1
AGE = 15
NAME = "MIKE"
IMAGE_1 = "aaa"
IMAGE_2 = "bbb"
IMAGE_4 = "a"
IMAGE_5 = "b"
|
rel-eng/lib/osbsbuilder.py | SalatskySal/atomic-reactor | 113 | 12711477 | <gh_stars>100-1000
from tito.builder import Builder
class AtomicReactorBuilder(Builder):
def __init__(self, **kwargs):
super(AtomicReactorBuilder, self).__init__(**kwargs)
# tarball has to represent Source0
# but internal structure should remain same
# i.e. {name}-{version} otherwise %setup -q
# will fail
self.tgz_filename = self.display_version + ".tar.gz"
|
cacreader/swig-4.0.2/Examples/test-suite/python/return_const_value_runme.py | kyletanyag/LL-Smartcard | 1,031 | 12711506 | import return_const_value
import sys
p = return_const_value.Foo_ptr_getPtr()
if (p.getVal() != 17):
print "Runtime test1 failed. p.getVal()=", p.getVal()
sys.exit(1)
p = return_const_value.Foo_ptr_getConstPtr()
if (p.getVal() != 17):
print "Runtime test2 failed. p.getVal()=", p.getVal()
sys.exit(1)
|
utils.py | karhankaan/CausalGAN | 119 | 12711577 | from __future__ import print_function
import tensorflow as tf
from functools import partial
import os
from os import listdir
from os.path import isfile, join
import shutil
import sys
from glob import glob
import math
import json
import logging
import numpy as np
from PIL import Image
from datetime import datetime
from tensorflow.core.framework import summary_pb2
def make_summary(name, val):
return summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag=name, simple_value=val)])
def summary_stats(name,tensor,collections=None,hist=False):
collections=collections or [tf.GraphKeys.SUMMARIES]
ave=tf.reduce_mean(tensor)
std=tf.sqrt(tf.reduce_mean(tf.square(ave-tensor)))
tf.summary.scalar(name+'_ave',ave,collections)
tf.summary.scalar(name+'_std',std,collections)
if hist:
tf.summary.histogram(name+'_hist',tensor,collections)
def prepare_dirs_and_logger(config):
if config.load_path:
strip_lp=config.load_path.strip('./')
if strip_lp.startswith(config.log_dir):
config.model_dir = config.load_path
else:
if config.load_path.startswith(config.dataset):
config.model_name = config.load_path
else:
config.model_name = "{}_{}".format(config.dataset, config.load_path)
else:#new model
config.model_name = "{}_{}".format(config.dataset, get_time())
if config.descrip:
config.model_name+='_'+config.descrip
if not hasattr(config, 'model_dir'):
config.model_dir = os.path.join(config.log_dir, config.model_name)
config.data_path = os.path.join(config.data_dir, config.dataset)
if not config.load_path:
config.log_code_dir=os.path.join(config.model_dir,'code')
for path in [config.log_dir, config.data_dir,
config.model_dir]:
if not os.path.exists(path):
os.makedirs(path)
#Copy python code in directory into model_dir/code for future reference:
#All python files in this directory are copied.
code_dir=os.path.dirname(os.path.realpath(sys.argv[0]))
##additionally, all python files in these directories are also copied. Also symlinks are copied. The idea is to allow easier model loading in the future
allowed_dirs=['causal_controller','causal_began','causal_dcgan','figure_scripts']
#ignore copy of all non-*.py except for these directories
#If you make another folder you want copied, you have to add it here
ignore_these=partial(ignore_except,allowed_dirs=allowed_dirs)
shutil.copytree(code_dir,config.log_code_dir,symlinks=True,ignore=ignore_these)
# model_files = [f for f in listdir(code_dir) if isfile(join(code_dir, f))]
# for f in model_files:
# if f.endswith('.py'):
# shutil.copy2(f,config.log_code_dir)
def ignore_except(src,contents,allowed_dirs):
files=filter(os.path.isfile,contents)
dirs=filter(os.path.isdir,contents)
ignored_files=[f for f in files if not f.endswith('.py')]
ignored_dirs=[d for d in dirs if not d in allowed_dirs]
return ignored_files+ignored_dirs
def get_time():
return datetime.now().strftime("%m%d_%H%M%S")
def save_configs(config,cc_config,dcgan_config,began_config):
model_dir=config.model_dir
print("[*] MODEL dir: %s" % model_dir)
save_config(config)
save_config(cc_config,'cc_params.json',model_dir)
save_config(dcgan_config,'dcgan_params.json',model_dir)
save_config(began_config,'began_params.json',model_dir)
def save_config(config,name="params.json",where=None):
where=where or config.model_dir
param_path = os.path.join(where, name)
print("[*] PARAM path: %s" % param_path)
with open(param_path, 'w') as fp:
json.dump(config.__dict__, fp, indent=4, sort_keys=True)
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type=='GPU']
def distribute_input_data(data_loader,num_gpu):
'''
data_loader is a dictionary of tensors that are fed into our model
This function takes that dictionary of n*batch_size dimension tensors
and breaks it up into n dictionaries with the same key of tensors with
dimension batch_size. One is given to each gpu
'''
if num_gpu==0:
return {'/cpu:0':data_loader}
gpus=get_available_gpus()
if num_gpu > len(gpus):
raise ValueError('number of gpus specified={}, more than gpus available={}'.format(num_gpu,len(gpus)))
gpus=gpus[:num_gpu]
data_by_gpu={g:{} for g in gpus}
for key,value in data_loader.items():
spl_vals=tf.split(value,num_gpu)
for gpu,val in zip(gpus,spl_vals):
data_by_gpu[gpu][key]=val
return data_by_gpu
def rank(array):
return len(array.shape)
def make_grid(tensor, nrow=8, padding=2,
normalize=False, scale_each=False):
"""Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py
minor improvement, row/col was reversed"""
nmaps = tensor.shape[0]
ymaps = min(nrow, nmaps)
xmaps = int(math.ceil(float(nmaps) / ymaps))
height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
h, h_width = y * height + 1 + padding // 2, height - padding
w, w_width = x * width + 1 + padding // 2, width - padding
grid[h:h+h_width, w:w+w_width] = tensor[k]
k = k + 1
return grid
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, scale_each=False):
ndarr = make_grid(tensor, nrow=nrow, padding=padding,
normalize=normalize, scale_each=scale_each)
im = Image.fromarray(ndarr)
im.save(filename)
|
tests/ut/cpp/python_input/gtest_input/optimizer/clean_test.py | GuoSuiming/mindspore | 3,200 | 12711599 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@File : opt_clean.py
@Author : wangqiuliang
@Date : 2019-03-18
@Desc : parse python function for ut of erase class
"""
from dataclasses import dataclass
# Test_Erase_class
@dataclass
class Point:
x: float
y: float
def product(self):
return self.x * self.y
def test_erase_class_fn(p_in):
p = Point(p_in)
return p.x * p.y
|
atest/resources/testlibs/cache_error.py | hugovk/SeleniumLibrary | 792 | 12711645 | from robot.libraries.BuiltIn import BuiltIn
def invalidate_driver():
sl = BuiltIn().get_library_instance("SeleniumLibrary")
sl.register_driver(None, "tidii")
sl.register_driver(None, "foobar")
|
nmmo/entity/__init__.py | zhm9484/environment | 230 | 12711652 | <filename>nmmo/entity/__init__.py
from nmmo.entity.entity import Entity
from nmmo.entity.player import Player
|
search/binary_search/python/binary_search_first_occurrence.py | CarbonDDR/al-go-rithms | 1,253 | 12711653 | def binary_search(arr, item):
low = 0
high = len(arr)-1
result = -1
while (low <= high):
mid = (low + high)//2
if item == arr[mid]:
result = mid
high = mid - 1
elif (item < arr[mid]):
high = mid - 1
else:
low = mid + 1
return result
|
tests/example_tests/custom_query_strategies.py | simonlevine/modAL | 1,460 | 12711654 | <gh_stars>1000+
import numpy as np
from modAL.utils.combination import make_linear_combination, make_product
from modAL.utils.selection import multi_argmax
from modAL.uncertainty import classifier_uncertainty, classifier_margin
from modAL.models import ActiveLearner
from sklearn.datasets import make_blobs
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# generating the data
centers = np.asarray([[-2, 3], [0.5, 5], [1, 1.5]])
X, y = make_blobs(
n_features=2, n_samples=1000, random_state=0, cluster_std=0.7,
centers=centers
)
# initial training data
initial_idx = np.random.choice(range(len(X)), size=20)
X_training, y_training = X[initial_idx], y[initial_idx]
# initializing the learner
learner = ActiveLearner(
estimator=GaussianProcessClassifier(1.0 * RBF(1.0)),
X_training=X_training, y_training=y_training
)
# creating new utility measures by linear combination and product
# linear_combination will return 1.0*classifier_uncertainty + 1.0*classifier_margin
linear_combination = make_linear_combination(
classifier_uncertainty, classifier_margin,
weights=[1.0, 1.0]
)
# product will return (classifier_uncertainty**0.5)*(classifier_margin**0.1)
product = make_product(
classifier_uncertainty, classifier_margin,
exponents=[0.5, 0.1]
)
# defining the custom query strategy, which uses the linear combination of
# classifier uncertainty and classifier margin
def custom_query_strategy(classifier, X, n_instances=1):
utility = linear_combination(classifier, X)
return multi_argmax(utility, n_instances=n_instances)
custom_query_learner = ActiveLearner(
estimator=GaussianProcessClassifier(1.0 * RBF(1.0)),
query_strategy=custom_query_strategy,
X_training=X_training, y_training=y_training
)
# pool-based sampling
n_queries = 20
for idx in range(n_queries):
query_idx, query_instance = custom_query_learner.query(X, n_instances=2)
custom_query_learner.teach(
X=X[query_idx].reshape(-1, 2),
y=y[query_idx].reshape(-1, )
)
|
nodes/0.7.x/python/Roof.KindIsGlazed.py | jdehotin/Clockworkfordynamo | 147 | 12711661 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
items = UnwrapElement(IN[0])
booleans = list()
for item in items:
try:
if item.CurtainGrids:
booleans.append(True)
else:
booleans.append(False)
except:
booleans.append(False)
OUT = booleans |
fastAutoTest/core/wx/wxCommandManager.py | FranciscoShi/FAutoTest | 903 | 12711668 | <reponame>FranciscoShi/FAutoTest
# -*- coding: utf-8 -*-
'''
Tencent is pleased to support the open source community by making FAutoTest available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''
from fastAutoTest.core.wx.wxUserAPI import ActionType
from fastAutoTest.core.wx.wxUserAPI import ByType
class WxCommandManager(object):
# 使用$$可以作为格式化时的转义
_elementMap = {
ByType.ID: "$$('#$id')[0]",
ByType.NAME: "$$('.$name')[$index]",
ByType.XPATH: "var xpath ='$xpath';"
"xpath_obj = document.evaluate(xpath,document,null, XPathResult.ANY_TYPE, null);"
"var button = xpath_obj.iterateNext()"
}
# doCommandWithElement中执行的参数
_jsActionMap = {
ActionType.GET_ELEMENT_RECT: ";left=Math.round(button.getBoundingClientRect().left);"
"right=Math.round(button.getBoundingClientRect().right);"
"bottom=Math.round(button.getBoundingClientRect().bottom);"
"topp=Math.round(button.getBoundingClientRect().top);"
"x=Math.round((left+right)/2);"
"y=Math.round((topp+bottom)/2);",
ActionType.IS_ELEMENT_EXIST: ";button",
ActionType.GET_ELEMENT_TEXT: ";button.textContent;",
ActionType.GET_ELEMENT_SRC: ";button.getAttribute('src')",
}
_methodMap = {
ActionType.GET_DOCUMENT: "DOM.getDocument",
ActionType.GET_HTML: "DOM.getOuterHTML",
ActionType.SCROLL: "Input.synthesizeScrollGesture",
ActionType.CLICK: "Input.synthesizeTapGesture",
ActionType.GET_ELEMENT_RECT: "Runtime.evaluate",
ActionType.GET_PICKER_RECT: "Runtime.evaluate",
ActionType.GET_ELEMENT_TEXT: "Runtime.evaluate",
ActionType.GET_ELEMENT_SRC: "Runtime.evaluate",
ActionType.GET_PAGE_HEIGHT: "Runtime.evaluate",
ActionType.GET_JS_VALUE: "Runtime.evaluate",
ActionType.TEXT: "Input.dispatchKeyEvent",
ActionType.IS_ELEMENT_EXIST: "Runtime.evaluate",
ActionType.GET_WINDOW_HEIGHT: "Runtime.evaluate",
ActionType.GET_WINDOW_WIDTH: "Runtime.evaluate"
}
# string.Template
# jsonConcat最终拼接的模板
_paramsMap = {
"Runtime.evaluate": '{"expression": "$expression"}',
"Input.synthesizeScrollGesture":
'{"type": "mouseWheel", "x": $x, "y": $y,"xDistance": $xDistance, "yDistance": $yDistance,"speed":$speed}',
"Page.navigate": '{"url":"$url"}',
"Input.dispatchKeyEvent": '{"type":"$type","text":"$text","unmodifiedText":"$text"}',
"Input.synthesizeTapGesture": '{"x":$x,"y":$y}',
"DOM.getDocument": "{''}",
"DOM.getOuterHTML": '{"nodeId": $nodeId}',
}
# doCommandWithoutElement 中执行的参数
_expressionMap = {
ActionType.GET_PAGE_HEIGHT: 'document.body.scrollHeight',
ActionType.GET_JS_VALUE: '$value',
ActionType.GET_WINDOW_HEIGHT: 'document.documentElement.clientHeight',
ActionType.GET_WINDOW_WIDTH: "document.documentElement.clientWidth"
}
def getElement(self, actionType, default=None):
return self._elementMap.get(actionType, default)
def getJsAction(self, actionType, default=None):
return self._jsActionMap.get(actionType, default)
def getMethod(self, actionType, default=None):
return self._methodMap.get(actionType, default)
def getParams(self, actionType, default=None):
return self._paramsMap.get(actionType, default)
def getExpression(self, actionType, default=None):
return self._expressionMap.get(actionType, default)
|
seahub/api2/endpoints/ocm.py | weimens/seahub | 420 | 12711689 | <reponame>weimens/seahub<filename>seahub/api2/endpoints/ocm.py<gh_stars>100-1000
import logging
import random
import string
import requests
import json
from constance import config
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seaserv import seafile_api
from seahub.utils.repo import get_available_repo_perms, get_repo_owner
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.constants import PERMISSION_READ, PERMISSION_READ_WRITE
from seahub.ocm.models import OCMShareReceived, OCMShare
from seahub.ocm.settings import ENABLE_OCM, SUPPORTED_OCM_PROTOCOLS, \
OCM_SEAFILE_PROTOCOL, OCM_RESOURCE_TYPE_LIBRARY, OCM_API_VERSION, \
OCM_SHARE_TYPES, OCM_ENDPOINT, OCM_PROVIDER_ID, OCM_NOTIFICATION_TYPE_LIST, \
OCM_NOTIFICATION_SHARE_UNSHARED, OCM_NOTIFICATION_SHARE_DECLINED, OCM_PROTOCOL_URL, \
OCM_NOTIFICATION_URL, OCM_CREATE_SHARE_URL, OCM_REMOTE_SERVERS
logger = logging.getLogger(__name__)
# Convert seafile permission to ocm protocol standard permission
SEAFILE_PERMISSION2OCM_PERMISSION = {
PERMISSION_READ: ['read'],
PERMISSION_READ_WRITE: ['read', 'write'],
}
def get_server_name_by_url(url):
for name_domain_dict in OCM_REMOTE_SERVERS:
if name_domain_dict['server_url'] == url:
return name_domain_dict['server_name']
def gen_shared_secret(length=23):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(length))
def get_remote_protocol(url):
response = requests.get(url)
return json.loads(response.text)
def is_valid_url(url):
if not url.startswith('https://') and not url.startswith('http://'):
return False
if not url.endswith('/'):
return False
return True
def check_url_slash(url):
if not url.endswith('/'):
url += '/'
return url
class OCMProtocolView(APIView):
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
return ocm protocol info to remote server
"""
# TODO
# currently if ENABLE_OCM is False, return 404 as if ocm protocol is not implemented
# ocm protocol is not clear about this, https://github.com/GEANT/OCM-API/pull/37
if not ENABLE_OCM:
error_msg = 'feature not enabled.'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
result = {
'enabled': True,
'apiVersion': OCM_API_VERSION,
'endPoint': config.SERVICE_URL + '/' + OCM_ENDPOINT,
'resourceTypes': {
'name': OCM_RESOURCE_TYPE_LIBRARY,
'shareTypes': OCM_SHARE_TYPES,
'protocols': {
OCM_SEAFILE_PROTOCOL: OCM_SEAFILE_PROTOCOL,
}
}
}
return Response(result)
class OCMSharesView(APIView):
throttle_classes = (UserRateThrottle,)
def post(self, request):
"""
create ocm in consumer server
"""
# argument check
share_with = request.data.get('shareWith', '')
if not share_with:
error_msg = 'shareWith invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# curently only support repo share
repo_name = request.data.get('name', '')
if not repo_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
sender = request.data.get('sender', '')
if not sender:
error_msg = 'sender invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
share_type = request.data.get('shareType', '')
if share_type not in OCM_SHARE_TYPES:
error_msg = 'shareType %s invalid.' % share_type
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
resource_type = request.data.get('resourceType', '')
if resource_type != OCM_RESOURCE_TYPE_LIBRARY:
error_msg = 'resourceType %s invalid.' % resource_type
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
provider_id = request.data.get('providerId', '')
if not provider_id:
error_msg = 'providerId invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
"""
other ocm protocol fields currently not used
description = request.data.get('description', '')
owner = request.data.get('owner', '')
ownerDisplayName = request.data.get('ownerDisplayName', '')
senderDisplayName = request.data.get('senderDisplayName', '')
"""
protocol = request.data.get('protocol', '')
if not protocol:
error_msg = 'protocol invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if 'name' not in protocol.keys():
error_msg = 'protocol.name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if protocol['name'] not in SUPPORTED_OCM_PROTOCOLS:
error_msg = 'protocol %s not support.' % protocol['name']
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if 'options' not in protocol.keys():
error_msg = 'protocol.options invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if 'sharedSecret' not in protocol['options'].keys():
error_msg = 'protocol.options.sharedSecret invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if 'permissions' not in protocol['options'].keys():
error_msg = 'protocol.options.permissions invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if protocol['name'] == OCM_SEAFILE_PROTOCOL:
if 'repoId' not in protocol['options'].keys():
error_msg = 'protocol.options.repoId invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if 'seafileServiceURL' not in protocol['options'].keys():
error_msg = 'protocol.options.seafileServiceURL invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if protocol['name'] == OCM_SEAFILE_PROTOCOL:
shared_secret = protocol['options']['sharedSecret']
permissions = protocol['options']['permissions']
repo_id = protocol['options']['repoId']
from_server_url = protocol['options']['seafileServiceURL']
if OCMShareReceived.objects.filter(
from_user=sender,
to_user=share_with,
from_server_url=from_server_url,
repo_id=repo_id,
repo_name=repo_name,
provider_id=provider_id,
).exists():
return api_error(status.HTTP_400_BAD_REQUEST, 'same share already exists.')
if 'write' in permissions:
permission = PERMISSION_READ_WRITE
else:
permission = PERMISSION_READ
OCMShareReceived.objects.add(
shared_secret=shared_secret,
from_user=sender,
to_user=share_with,
from_server_url=from_server_url,
repo_id=repo_id,
repo_name=repo_name,
permission=permission,
provider_id=provider_id,
)
return Response(request.data, status=status.HTTP_201_CREATED)
class OCMNotificationsView(APIView):
throttle_classes = (UserRateThrottle,)
def post(self, request):
""" Handle notifications from remote server
"""
notification_type = request.data.get('notificationType', '')
if not notification_type:
error_msg = 'notificationType %s invalid.' % notification_type
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if notification_type not in OCM_NOTIFICATION_TYPE_LIST:
error_msg = 'notificationType %s not supportd.' % notification_type
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
resource_type = request.data.get('resourceType', '')
if resource_type != OCM_RESOURCE_TYPE_LIBRARY:
error_msg = 'resourceType %s invalid.' % resource_type
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
notification = request.data.get('notification', '')
if not notification:
error_msg = 'notification invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
shared_secret = notification.get('sharedSecret', '')
if not shared_secret:
error_msg = 'sharedSecret invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if notification_type == OCM_NOTIFICATION_SHARE_UNSHARED:
"""
Provider unshared, then delete ocm_share_received record on Consumer
"""
try:
ocm_share_received = OCMShareReceived.objects.get(shared_secret=shared_secret)
except OCMShareReceived.DoesNotExist:
return Response(request.data)
if ocm_share_received:
try:
ocm_share_received.delete()
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Invernal Server Error')
elif notification_type == OCM_NOTIFICATION_SHARE_DECLINED:
"""
Consumer declined share, then delete ocm_share record on Provider
"""
try:
ocm_share = OCMShare.objects.get(shared_secret=shared_secret)
except OCMShareReceived.DoesNotExist:
return Response(request.data)
if ocm_share:
try:
ocm_share.delete()
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Invernal Server Error')
return Response(request.data)
class OCMSharesPrepareView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
list ocm shares of request user, filt by repo_id
"""
repo_id = request.GET.get('repo_id', '')
if repo_id:
ocm_shares = OCMShare.objects.filter(repo_id=repo_id, from_user=request.user.username)
else:
ocm_shares = OCMShare.objects.filter(from_user=request.user.username)
ocm_share_list = []
for ocm_share in ocm_shares:
ocm_info = ocm_share.to_dict()
ocm_info['to_server_name'] = get_server_name_by_url(ocm_share.to_server_url)
ocm_share_list.append(ocm_info)
return Response({'ocm_share_list': ocm_share_list})
def post(self, request):
"""
prepare provider server info for ocm, and send post request to consumer
three step:
1. send get request to remote server, ask if support ocm, and get other info
2. send post request to remote server, remote server create a recored in remote
ocm_share_received table
3. store a recored in local ocm_share table
"""
# argument check
to_user = request.data.get('to_user', '')
if not to_user:
error_msg = 'to_user invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
to_server_url = request.data.get('to_server_url', '').lower().strip()
if not to_server_url or not is_valid_url(to_server_url):
error_msg = 'to_server_url %s invalid.' % to_server_url
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id = request.data.get('repo_id', '')
if not repo_id:
error_msg = 'repo_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo = seafile_api.get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library %s not found.' % repo_id)
path = request.data.get('path', '/')
# TODO
# 1. folder check
# 2. encrypted repo check
#
# if seafile_api.get_dir_id_by_path(repo.id, path) is None:
# return api_error(status.HTTP_404_NOT_FOUND, 'Folder %s not found.' % path)
#
# if repo.encrypted and path != '/':
# return api_error(status.HTTP_400_BAD_REQUEST, 'Folder invalid.')
permission = request.data.get('permission', PERMISSION_READ)
if permission not in get_available_repo_perms():
return api_error(status.HTTP_400_BAD_REQUEST, 'permission invalid.')
username = request.user.username
repo_owner = get_repo_owner(request, repo_id)
if repo_owner != username:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
if OCMShare.objects.filter(
from_user=request.user.username,
to_user=to_user,
to_server_url=to_server_url,
repo_id=repo_id,
repo_name=repo.repo_name,
path=path,
).exists():
return api_error(status.HTTP_400_BAD_REQUEST, 'same share already exists.')
consumer_protocol = get_remote_protocol(to_server_url + OCM_PROTOCOL_URL)
shared_secret = gen_shared_secret()
from_user = username
post_data = {
'shareWith': to_user,
'name': repo.repo_name,
'description': '',
'providerId': OCM_PROVIDER_ID,
'owner': repo_owner,
'sender': from_user,
'ownerDisplayName': email2nickname(repo_owner),
'senderDisplayName': email2nickname(from_user),
'shareType': consumer_protocol['resourceTypes']['shareTypes'][0], # currently only support user type
'resourceType': consumer_protocol['resourceTypes']['name'], # currently only support repo
'protocol': {
'name': OCM_SEAFILE_PROTOCOL,
'options': {
'sharedSecret': shared_secret,
'permissions': SEAFILE_PERMISSION2OCM_PERMISSION[permission],
'repoId': repo_id,
'seafileServiceURL': check_url_slash(config.SERVICE_URL),
},
},
}
url = consumer_protocol['endPoint'] + OCM_CREATE_SHARE_URL
try:
requests.post(url, json=post_data)
except Exception as e:
logging.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
ocm_share = OCMShare.objects.add(
shared_secret=shared_secret,
from_user=request.user.username,
to_user=to_user,
to_server_url=to_server_url,
repo_id=repo_id,
repo_name=repo.repo_name,
path=path,
permission=permission,
)
ocm_info = ocm_share.to_dict()
ocm_info['to_server_name'] = get_server_name_by_url(ocm_share.to_server_url)
return Response(ocm_info)
class OCMSharePrepareView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def delete(self, request, pk):
"""
delete an share received record
"""
try:
ocm_share = OCMShare.objects.get(pk=pk)
except OCMShareReceived.DoesNotExist:
error_msg = 'OCMShare %s not found.' % pk
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if ocm_share.from_user != request.user.username:
error_msg = 'permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
to_server_url = ocm_share.to_server_url
shared_secret = ocm_share.shared_secret
consumer_protocol = get_remote_protocol(to_server_url + OCM_PROTOCOL_URL)
# send unshare notification to consumer
post_data = {
'notificationType': OCM_NOTIFICATION_SHARE_UNSHARED,
'resourceType': OCM_RESOURCE_TYPE_LIBRARY,
'providerId': OCM_PROVIDER_ID,
'notification': {
'sharedSecret': shared_secret,
'message': '',
},
}
url = consumer_protocol['endPoint'] + OCM_NOTIFICATION_URL
try:
requests.post(url, json=post_data)
except Exception as e:
logging.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
try:
ocm_share.delete()
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
return Response({'success': True})
class OCMSharesReceivedView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
list ocm shares received
"""
ocm_share_received_list = []
ocm_shares_received = OCMShareReceived.objects.filter(to_user=request.user.username)
for ocm_share_received in ocm_shares_received:
ocm_share_received_list.append(ocm_share_received.to_dict())
return Response({'ocm_share_received_list': ocm_share_received_list})
class OCMShareReceivedView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def delete(self, request, pk):
"""
delete an share received record
"""
try:
ocm_share_received = OCMShareReceived.objects.get(pk=pk)
except OCMShareReceived.DoesNotExist:
error_msg = 'OCMShareReceived %s not found.' % pk
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if ocm_share_received.to_user != request.user.username:
error_msg = 'permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
from_server_url = ocm_share_received.from_server_url
shared_secret = ocm_share_received.shared_secret
provider_protocol = get_remote_protocol(from_server_url + OCM_PROTOCOL_URL)
# send unshare notification to consumer
post_data = {
'notificationType': OCM_NOTIFICATION_SHARE_DECLINED,
'resourceType': OCM_RESOURCE_TYPE_LIBRARY,
'providerId': OCM_PROVIDER_ID,
'notification': {
'sharedSecret': shared_secret,
'message': '',
},
}
url = provider_protocol['endPoint'] + OCM_NOTIFICATION_URL
try:
requests.post(url, json=post_data)
except Exception as e:
logging.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
try:
ocm_share_received.delete()
except Exception as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
return Response({'success': True})
|
lib/datasets/factory.py | LeiYangJustin/UnseenObjectClustering | 101 | 12711702 | # Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Factory method for easily getting imdbs by name."""
__sets = {}
import datasets.tabletop_object
import datasets.osd_object
import datasets.ocid_object
import numpy as np
# tabletop object dataset
for split in ['train', 'test', 'all']:
name = 'tabletop_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.TableTopObject(split))
# OSD object dataset
for split in ['test']:
name = 'osd_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.OSDObject(split))
# OCID object dataset
for split in ['test']:
name = 'ocid_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.OCIDObject(split))
def get_dataset(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_datasets():
"""List all registered imdbs."""
return __sets.keys()
|
src/beanmachine/ppl/compiler/tests/tutorial_GMM_with_1_dimensions_and_4_components_test.py | feynmanliang/beanmachine | 177 | 12711708 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for 1D GMM with K > 2 number of components"""
import logging
import unittest
# Comments after imports suggest alternative comment style (for original tutorial)
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(42)
# Model
class GaussianMixtureModel(object):
def __init__(self, k):
self.K = k
@bm.random_variable
def alpha(self, k):
return dist.Dirichlet(5 * torch.ones(k))
@bm.random_variable
def mu(self, c):
return dist.Normal(0, 10)
@bm.random_variable
def sigma(self, c):
return dist.Gamma(1, 10)
@bm.random_variable
def component(self, i):
alpha = self.alpha(self.K)
return dist.Categorical(alpha)
@bm.random_variable
def y(self, i):
c = self.component(i)
return dist.Normal(self.mu(c), self.sigma(c))
# Creating sample data
n = 6 # num observations
k = 4 # true number of clusters
gmm = GaussianMixtureModel(k=k)
ground_truth = {
**{
gmm.alpha(k): torch.ones(k) * 1.0 / k,
},
**{gmm.mu(i): tensor(i % 2).float() for i in range(k)},
**{gmm.sigma(i): tensor(0.1) for i in range(k)},
**{gmm.component(i): tensor(i % k).float() for i in range(n)},
}
# [Visualization code in tutorial skipped]
# Inference parameters
num_samples = (
1 ###00 Sample size should not affect (the ability to find) compilation issues.
)
queries = (
[gmm.alpha(gmm.K)]
+ [gmm.component(j) for j in range(n)]
+ [gmm.mu(i) for i in range(k)]
+ [gmm.sigma(i) for i in range(k)]
)
observations = {
gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())]
for i in range(n)
}
class tutorialGMMwith1DimensionsAnd4Components(unittest.TestCase):
def test_tutorial_GMM_with_1_dimensions_and_4_components(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
torch.manual_seed(
42
) # Note: Second time we seed. Could be a good tutorial style
mh = bm.CompositionalInference({...: bm.SingleSiteNewtonianMonteCarlo()})
mh.infer(
queries,
observations,
num_samples=num_samples,
num_chains=1,
)
self.assertTrue(True, msg="We just want to check this point is reached")
def test_tutorial_GMM_with_1_dimensions_and_4_components_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """digraph "graph" {
N00[label="[5.0,5.0,5.0,5.0]"];
N01[label=Dirichlet];
N02[label=Sample];
N03[label=Categorical];
N04[label=Sample];
N05[label=0.0];
N06[label=10.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label=Sample];
N11[label=Sample];
N12[label=1.0];
N13[label=Gamma];
N14[label=Sample];
N15[label=Sample];
N16[label=Sample];
N17[label=Sample];
N18[label=Choice];
N19[label=Choice];
N20[label=Normal];
N21[label=Sample];
N22[label="Observation 0.0"];
N23[label=Sample];
N24[label=Choice];
N25[label=Choice];
N26[label=Normal];
N27[label=Sample];
N28[label="Observation 1.0"];
N29[label=Sample];
N30[label=Choice];
N31[label=Choice];
N32[label=Normal];
N33[label=Sample];
N34[label="Observation 0.0"];
N35[label=Sample];
N36[label=Choice];
N37[label=Choice];
N38[label=Normal];
N39[label=Sample];
N40[label="Observation 1.0"];
N41[label=Sample];
N42[label=Choice];
N43[label=Choice];
N44[label=Normal];
N45[label=Sample];
N46[label="Observation 0.0"];
N47[label=Sample];
N48[label=Choice];
N49[label=Choice];
N50[label=Normal];
N51[label=Sample];
N52[label="Observation 1.0"];
N53[label=Query];
N54[label=Query];
N55[label=Query];
N56[label=Query];
N57[label=Query];
N58[label=Query];
N59[label=Query];
N60[label=Query];
N61[label=Query];
N62[label=Query];
N63[label=Query];
N64[label=Query];
N65[label=Query];
N66[label=Query];
N67[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N03;
N02 -> N53;
N03 -> N04;
N03 -> N23;
N03 -> N29;
N03 -> N35;
N03 -> N41;
N03 -> N47;
N04 -> N18;
N04 -> N19;
N04 -> N54;
N05 -> N07;
N06 -> N07;
N06 -> N13;
N07 -> N08;
N07 -> N09;
N07 -> N10;
N07 -> N11;
N08 -> N18;
N08 -> N24;
N08 -> N30;
N08 -> N36;
N08 -> N42;
N08 -> N48;
N08 -> N60;
N09 -> N18;
N09 -> N24;
N09 -> N30;
N09 -> N36;
N09 -> N42;
N09 -> N48;
N09 -> N61;
N10 -> N18;
N10 -> N24;
N10 -> N30;
N10 -> N36;
N10 -> N42;
N10 -> N48;
N10 -> N62;
N11 -> N18;
N11 -> N24;
N11 -> N30;
N11 -> N36;
N11 -> N42;
N11 -> N48;
N11 -> N63;
N12 -> N13;
N13 -> N14;
N13 -> N15;
N13 -> N16;
N13 -> N17;
N14 -> N19;
N14 -> N25;
N14 -> N31;
N14 -> N37;
N14 -> N43;
N14 -> N49;
N14 -> N64;
N15 -> N19;
N15 -> N25;
N15 -> N31;
N15 -> N37;
N15 -> N43;
N15 -> N49;
N15 -> N65;
N16 -> N19;
N16 -> N25;
N16 -> N31;
N16 -> N37;
N16 -> N43;
N16 -> N49;
N16 -> N66;
N17 -> N19;
N17 -> N25;
N17 -> N31;
N17 -> N37;
N17 -> N43;
N17 -> N49;
N17 -> N67;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N23 -> N24;
N23 -> N25;
N23 -> N55;
N24 -> N26;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N29 -> N30;
N29 -> N31;
N29 -> N56;
N30 -> N32;
N31 -> N32;
N32 -> N33;
N33 -> N34;
N35 -> N36;
N35 -> N37;
N35 -> N57;
N36 -> N38;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N41 -> N42;
N41 -> N43;
N41 -> N58;
N42 -> N44;
N43 -> N44;
N44 -> N45;
N45 -> N46;
N47 -> N48;
N47 -> N49;
N47 -> N59;
N48 -> N50;
N49 -> N50;
N50 -> N51;
N51 -> N52;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """graph::Graph g;
Eigen::MatrixXd m0(4, 1)
m0 << 5.0, 5.0, 5.0, 5.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint n1 = g.add_distribution(
graph::DistributionType::DIRICHLET,
graph::ValueType(
graph::VariableType::COL_SIMPLEX_MATRIX,
graph::AtomicType::PROBABILITY,
4,
1
),
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_distribution(
graph::DistributionType::CATEGORICAL,
graph::AtomicType::NATURAL,
std::vector<uint>({n2}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n5 = g.add_constant(0.0);
uint n6 = g.add_constant_pos_real(10.0);
uint n7 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n5, n6}));
uint n8 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n9 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n10 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n11 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n12 = g.add_constant_pos_real(1.0);
uint n13 = g.add_distribution(
graph::DistributionType::GAMMA,
graph::AtomicType::POS_REAL,
std::vector<uint>({n12, n6}));
uint n14 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n15 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n16 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n17 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n18 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n8, n9, n10, n11}));
uint n19 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n14, n15, n16, n17}));
uint n20 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n18, n19}));
uint n21 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n20}));
g.observe([n21], 0.0);
uint n22 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n23 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n8, n9, n10, n11}));
uint n24 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n14, n15, n16, n17}));
uint n25 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n23, n24}));
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n25}));
g.observe([n26], 1.0);
uint n27 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n28 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n8, n9, n10, n11}));
uint n29 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n14, n15, n16, n17}));
uint n30 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n28, n29}));
uint n31 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n30}));
g.observe([n31], 0.0);
uint n32 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n33 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n8, n9, n10, n11}));
uint n34 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n14, n15, n16, n17}));
uint n35 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n33, n34}));
uint n36 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n35}));
g.observe([n36], 1.0);
uint n37 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n38 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n8, n9, n10, n11}));
uint n39 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n14, n15, n16, n17}));
uint n40 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n38, n39}));
uint n41 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n40}));
g.observe([n41], 0.0);
uint n42 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n43 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n8, n9, n10, n11}));
uint n44 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n14, n15, n16, n17}));
uint n45 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n43, n44}));
uint n46 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n45}));
g.observe([n46], 1.0);
uint q0 = g.query(n2);
uint q1 = g.query(n4);
uint q2 = g.query(n22);
uint q3 = g.query(n27);
uint q4 = g.query(n32);
uint q5 = g.query(n37);
uint q6 = g.query(n42);
uint q7 = g.query(n8);
uint q8 = g.query(n9);
uint q9 = g.query(n10);
uint q10 = g.query(n11);
uint q11 = g.query(n14);
uint q12 = g.query(n15);
uint q13 = g.query(n16);
uint q14 = g.query(n17);
"""
self.assertEqual(expected.strip(), observed.strip())
|
Python/FactorialOfNumbers.py | OluSure/Hacktoberfest2021-1 | 215 | 12711717 | <filename>Python/FactorialOfNumbers.py<gh_stars>100-1000
for i in range(int(input())):
fact=1
a=int(input())
for j in range(1,a+1,1):
fact=fact*j
print(fact) |
become_yukarin/data_struct.py | nameless-writer/become-yukarin | 562 | 12711738 | <gh_stars>100-1000
from typing import NamedTuple, Dict, List
import numpy
import pyworld
_min_mc = -18.3
class Wave(NamedTuple):
wave: numpy.ndarray
sampling_rate: int
class AcousticFeature(NamedTuple):
f0: numpy.ndarray = numpy.nan
spectrogram: numpy.ndarray = numpy.nan
aperiodicity: numpy.ndarray = numpy.nan
mfcc: numpy.ndarray = numpy.nan
voiced: numpy.ndarray = numpy.nan
@staticmethod
def dtypes():
return dict(
f0=numpy.float32,
spectrogram=numpy.float32,
aperiodicity=numpy.float32,
mfcc=numpy.float32,
voiced=numpy.bool,
)
def astype(self, dtype):
return AcousticFeature(
f0=self.f0.astype(dtype),
spectrogram=self.spectrogram.astype(dtype),
aperiodicity=self.aperiodicity.astype(dtype),
mfcc=self.mfcc.astype(dtype),
voiced=self.voiced.astype(dtype),
)
def astype_only_float(self, dtype):
return AcousticFeature(
f0=self.f0.astype(dtype),
spectrogram=self.spectrogram.astype(dtype),
aperiodicity=self.aperiodicity.astype(dtype),
mfcc=self.mfcc.astype(dtype),
voiced=self.voiced,
)
def validate(self):
assert self.f0.ndim == 2
assert self.spectrogram.ndim == 2
assert self.aperiodicity.ndim == 2
assert self.mfcc.ndim == 2
assert self.voiced.ndim == 2
len_time = len(self.f0)
assert len(self.spectrogram) == len_time
assert len(self.aperiodicity) == len_time
assert len(self.mfcc) == len_time
assert len(self.voiced) == len_time
assert self.voiced.dtype == numpy.bool
@staticmethod
def silent(length: int, sizes: Dict[str, int], keys: List[str]):
d = {}
if 'f0' in keys:
d['f0'] = numpy.zeros((length, sizes['f0']), dtype=AcousticFeature.dtypes()['f0'])
if 'spectrogram' in keys:
d['spectrogram'] = numpy.zeros((length, sizes['spectrogram']),
dtype=AcousticFeature.dtypes()['spectrogram'])
if 'aperiodicity' in keys:
d['aperiodicity'] = numpy.zeros((length, sizes['aperiodicity']),
dtype=AcousticFeature.dtypes()['aperiodicity'])
if 'mfcc' in keys:
d['mfcc'] = numpy.hstack((
numpy.ones((length, 1), dtype=AcousticFeature.dtypes()['mfcc']) * _min_mc,
numpy.zeros((length, sizes['mfcc'] - 1), dtype=AcousticFeature.dtypes()['mfcc'])
))
if 'voiced' in keys:
d['voiced'] = numpy.zeros((length, sizes['voiced']), dtype=AcousticFeature.dtypes()['voiced'])
feature = AcousticFeature(**d)
return feature
@staticmethod
def concatenate(fs: List['AcousticFeature'], keys: List[str]):
is_target = lambda a: not numpy.any(numpy.isnan(a))
return AcousticFeature(**{
key: numpy.concatenate([getattr(f, key) for f in fs]) if is_target(getattr(fs[0], key)) else numpy.nan
for key in keys
})
def pick(self, first: int, last: int):
is_target = lambda a: not numpy.any(numpy.isnan(a))
return AcousticFeature(
f0=self.f0[first:last] if is_target(self.f0) else numpy.nan,
spectrogram=self.spectrogram[first:last] if is_target(self.spectrogram) else numpy.nan,
aperiodicity=self.aperiodicity[first:last] if is_target(self.aperiodicity) else numpy.nan,
mfcc=self.mfcc[first:last] if is_target(self.mfcc) else numpy.nan,
voiced=self.voiced[first:last] if is_target(self.voiced) else numpy.nan,
)
@staticmethod
def get_sizes(sampling_rate: int, order: int):
fft_size = pyworld.get_cheaptrick_fft_size(fs=sampling_rate)
return dict(
f0=1,
spectrogram=fft_size // 2 + 1,
aperiodicity=fft_size // 2 + 1,
mfcc=order + 1,
voiced=1,
)
class LowHighSpectrogramFeature(NamedTuple):
low: numpy.ndarray
high: numpy.ndarray
def validate(self):
assert self.low.ndim == 2
assert self.high.ndim == 2
assert self.low.shape == self.high.shape
|
transcrypt/modules/org/reactjs/__init__.py | kochelmonster/Transcrypt | 2,200 | 12711743 | createElement = React.createElement
createContext = React.createContext
forwardRef = React.forwardRef
Component = ReactComponent = React.Component
useState = React.useState
useEffect = React.useEffect
useContext = React.useContext
useReducer = React.useReducer
useCallback = React.useCallback
useMemo = React.useMemo
useRef = React.useRef
useImperativeHandle = React.useImperativeHandle
useLayoutEffect = React.useLayoutEffect
useDebugValue = React.useDebugValue
def withDeps(*deps):
useHook = this
def decorator(fn):
useHook(fn, deps)
return fn
return decorator
useEffect.withDeps = withDeps
useLayoutEffect.withDeps = withDeps
def useCallbackWithDeps(*deps):
def decorator(fn):
return React.useCallback(fn, deps)
return decorator
useCallback.withDeps = useCallbackWithDeps
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotLoadCommonAttributes_t.py | htlcnn/ironpython-stubs | 182 | 12711744 | <reponame>htlcnn/ironpython-stubs<filename>release/stubs.min/Tekla/Structures/ModelInternal_parts/dotLoadCommonAttributes_t.py
class dotLoadCommonAttributes_t(object):
# no doc
aPartFilter=None
AutomaticPrimaryAxisWeight=None
BoundingBoxDx=None
BoundingBoxDy=None
BoundingBoxDz=None
CreateFixedSupportConditionsAutomatically=None
FatherId=None
LoadAttachment=None
LoadDispersionAngle=None
LoadGroupId=None
ModelObject=None
PartNames=None
PrimaryAxisDirection=None
Spanning=None
Weight=None
|
tests/isolated/patcher_importlib_lock.py | li-caspar/eventlet_0.30.2 | 5,079 | 12711790 | <filename>tests/isolated/patcher_importlib_lock.py
__test__ = False
def do_import():
import encodings.idna
if __name__ == '__main__':
import sys
import eventlet
eventlet.monkey_patch()
threading = eventlet.patcher.original('threading')
sys.modules.pop('encodings.idna', None)
# call "import encodings.idna" in a new thread
thread = threading.Thread(target=do_import)
thread.start()
# call "import encodings.idna" in the main thread
do_import()
thread.join()
print('pass')
|
examples_allennlp/utils/embedders/scalar_mix_transoformer_embedder.py | techthiyanes/luke | 467 | 12711802 | <filename>examples_allennlp/utils/embedders/scalar_mix_transoformer_embedder.py
from allennlp.modules.token_embedders import TokenEmbedder, PretrainedTransformerEmbedder
from allennlp.modules.scalar_mix import ScalarMix
@TokenEmbedder.register("intermediate_pretrained_transformer")
class IntermediatePretrainedTransformerEmbedder(PretrainedTransformerEmbedder):
def __init__(self, layer_index: int, **kwargs) -> None:
super().__init__(**kwargs, last_layer_only=False)
initial_scalar_parameters = [-1e9 for _ in range(self.config.num_hidden_layers)]
initial_scalar_parameters[layer_index] = 0
self._scalar_mix = ScalarMix(
self.config.num_hidden_layers,
initial_scalar_parameters=initial_scalar_parameters,
trainable=False,
do_layer_norm=False,
)
|
src/masonite/providers/WhitenoiseProvider.py | cercos/masonite | 1,816 | 12711822 | from .Provider import Provider
from whitenoise import WhiteNoise
import os
class WhitenoiseProvider(Provider):
def __init__(self, application):
self.application = application
def register(self):
response_handler = WhiteNoise(
self.application.get_response_handler(),
root=self.application.get_storage_path(),
autorefresh=True,
)
for location, alias in (
self.application.make("storage_capsule").get_storage_assets().items()
):
response_handler.add_files(location, prefix=alias)
self.application.set_response_handler(response_handler)
def boot(self):
return
|
caption/encoders/vanilla.py | SikandarBakht/asg2cap | 169 | 12711851 | <gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
import framework.ops
'''
Vanilla Encoder: embed nd array (batch_size, ..., dim_ft)
- EncoderConfig
- Encoder
Multilayer Perceptrons: feed forward networks + softmax
- MLPConfig
- MLP
'''
class EncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.dim_fts = [2048]
self.dim_embed = 512
self.is_embed = True
self.dropout = 0
self.norm = False
self.nonlinear = False
def _assert(self):
if not self.is_embed:
assert self.dim_embed == sum(self.dim_fts)
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if self.config.is_embed:
self.ft_embed = nn.Linear(sum(self.config.dim_fts), self.config.dim_embed)
self.dropout = nn.Dropout(self.config.dropout)
def forward(self, fts):
'''
Args:
fts: size=(batch, ..., sum(dim_fts))
Returns:
embeds: size=(batch, dim_embed)
'''
embeds = fts
if self.config.is_embed:
embeds = self.ft_embed(embeds)
if self.config.nonlinear:
embeds = F.relu(embeds)
if self.config.norm:
embeds = framework.ops.l2norm(embeds)
embeds = self.dropout(embeds)
return embeds
|
setup.py | adamserafini/pyxl | 366 | 12711868 | <gh_stars>100-1000
#!/usr/bin/env python
import distutils.core
import sys
version = "1.0"
distutils.core.setup(
name="pyxl",
version=version,
packages = ["pyxl", "pyxl.codec", "pyxl.scripts", "pyxl.examples"],
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/awable/pyxl",
download_url="http://github.com/downloads/awable/pyxl/pyxl-%s.tar.gz" % version,
license="http://www.apache.org/licenses/LICENSE-2.0",
description="""
Pyxl is an open source package that extends Python to support inline HTML. It converts
HTML fragments into valid Python expressions, and is meant as a replacement for traditional
python templating systems like Mako or Cheetah. It automatically escapes data, enforces
correct markup and makes it easier to write reusable and well structured UI code.
Pyxl was inspired by the XHP project at Facebook.
"""
)
|
numpy/mnist/adam.py | wiseodd/natural-gradients | 104 | 12711905 | import numpy as np
import input_data
from sklearn.utils import shuffle
np.random.seed(9999)
mnist = input_data.read_data_sets('../MNIST_data', one_hot=True)
X_train = mnist.train.images
t_train = mnist.train.labels
X_test = mnist.test.images
t_test = mnist.test.labels
X_train, t_train = shuffle(X_train, t_train)
# Model
W1 = np.random.randn(784, 100) * 0.01
W2 = np.random.randn(100, 10) * 0.01
def softmax(x):
ex = np.exp(x - np.max(x, axis=1)[:, None])
return ex / ex.sum(axis=1)[:, None]
def NLL(z, t):
return -np.mean(np.sum(t*np.log(softmax(z) + eps), axis=1))
m = 200 # mb size
alpha = 0.001
rho1 = 0.9 # Decay for F
rho2 = 0.999 # Momentum
s1 = np.zeros_like(W1)
r1 = np.zeros_like(W1)
s2 = np.zeros_like(W2)
r2 = np.zeros_like(W2)
eps = 1e-8
# Visualization stuffs
losses = []
# Training
for i in range(1, 5000):
X_mb, t_mb = mnist.train.next_batch(m)
t_mb_idx = t_mb.argmax(axis=1)
# Forward
a = X_mb @ W1
h = np.maximum(a, 0)
z = h @ W2
loss = NLL(z, t_mb)
# Loss
if (i-1) % 100 == 0:
print(f'Iter-{i}; Loss: {loss:.3f}')
losses.append(loss if i == 1 else 0.99*losses[-1] + 0.01*loss)
m = z.shape[0]
# Gradients
dz = softmax(z)
dz[range(dz.shape[0]), t_mb_idx] -= 1 # m*10
dz /= m
dW2 = h.T @ dz # 100*10
dh = dz @ W2.T # m*100
dh[a < 0] = 0 # ReLU
dW1 = X_mb.T @ dh # 784*100
# Moments
s1 = rho1*s1 + (1-rho1)*dW1
r1 = rho2*r1 + (1-rho2)*(dW1*dW1)
s2 = rho1*s2 + (1-rho1)*dW2
r2 = rho2*r2 + (1-rho2)*(dW2*dW2)
# r = rho2*r + (1-rho2)*(m*g*g) # Corresponds to diagonal approx. of FIM
# Bias correction
s1_ = s1/(1-rho1**i)
r1_ = r1/(1-rho2**i)
s2_ = s2/(1-rho1**i)
r2_ = r2/(1-rho2**i)
# Step
delta1 = s1_ / (np.sqrt(r1_) + eps)
delta2 = s2_ / (np.sqrt(r2_) + eps)
# delta = s_ / (r_ + eps) # Inverse of diagonal FIM
# W = W - alpha * g # SGD update
W1 = W1 - alpha * delta1
W2 = W2 - alpha * delta2
y = softmax(np.maximum(X_test @ W1, 0) @ W2).argmax(axis=1)
acc = np.mean(y == t_test.argmax(axis=1))
print(f'Accuracy: {acc:.3f}')
np.save('adam_losses.npy', losses)
|
mode/examples/Contributed Libraries in Python/OpenCV/BrightnessContrast/BrightnessContrast.pyde | timgates42/processing.py | 1,224 | 12711920 | add_library('opencv_processing')
img = None
opencv = None
def setup():
img = loadImage("test.jpg")
size(img.width, img.height, P2D)
opencv = OpenCV(this, img)
def draw():
opencv.loadImage(img)
opencv.brightness(int(map(mouseX, 0, width, -255, 255)))
image(opencv.getOutput(), 0, 0)
|
tools/openmldb_migrate.py | jasleon/OpenMLDB | 2,659 | 12711955 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
import time
USE_SHELL = sys.platform.startswith( "win" )
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--openmldb_bin_path",
dest="openmldb_bin_path",
help="the openmldb bin path")
parser.add_option("--zk_cluster",
dest="zk_cluster",
help="the zookeeper cluster")
parser.add_option("--zk_root_path",
dest="zk_root_path",
help="the zookeeper root path")
parser.add_option("--cmd",
dest="cmd",
help="the cmd for migrate")
parser.add_option("--endpoint",
dest="endpoint",
help="the endpoint for migrate")
parser.add_option("--showtable_path",
dest="showtable_path",
help="the path of showtable result file")
(options, args) = parser.parse_args()
common_cmd = [options.openmldb_bin_path, "--zk_cluster=" + options.zk_cluster, "--zk_root_path=" + options.zk_root_path, "--role=ns_client", "--interactive=false"]
def promot_input(msg,validate_func=None,try_times=1):
while try_times>0:
answer = raw_input(msg).strip()
if validate_func and validate_func(answer):
return answer
try_times-=1
return None
def promot_password_input(msg,validate_func=None,try_times=1):
while try_times>0:
answer = getpass.getpass(msg).strip()
if validate_func and validate_func(answer):
return answer
try_times-=1
return None
def not_none_or_empty(user_input):
if input:
return True
return False
def yes_or_no_validate(user_input):
if user_input and user_input.lower()=='y':
return True
return False
def yes_or_no_promot(msg):
answer = raw_input(msg).strip()
return yes_or_no_validate(answer)
def RunWithRealtimePrint(command,
universal_newlines = True,
useshell = USE_SHELL,
env = os.environ,
print_output = True):
try:
p = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = useshell,
env = env )
if print_output:
for line in iter(p.stdout.readline,''):
sys.stdout.write(line)
sys.stdout.write('\r')
p.wait()
return p.returncode
except Exception,ex:
print(ex)
return -1
def RunWithRetuncode(command,
universal_newlines = True,
useshell = USE_SHELL,
env = os.environ):
try:
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = useshell, universal_newlines = universal_newlines, env = env )
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
p.stdout.close()
p.stderr.close()
return p.returncode,output,errout
except Exception,ex:
print(ex)
return -1,None,None
def GetTables(output):
# name tid pid endpoint role ttl is_alive compress_type
lines = output.split("\n")
content_is_started = False
partition_on_tablet = {}
for line in lines:
if line.startswith("---------"):
content_is_started = True
continue
if not content_is_started:
continue
partition = line.split()
if len(partition) < 4:
continue
partitons = partition_on_tablet.get(partition[3], [])
partitons.append(partition)
partition_on_tablet[partition[3]] = partitons
return partition_on_tablet
def GetTablesStatus(output):
# tid pid offset mode state enable_expire ttl ttl_offset memused compress_type skiplist_height
lines = output.split("\n")
content_is_started = False
partition_on_tablet = {}
for line in lines:
if line.startswith("---------"):
content_is_started = True
continue
if not content_is_started:
continue
partition = line.split()
if len(partition) < 4:
continue
key = "{}_{}".format(partition[0], partition[1])
partition_on_tablet[key] = partition
return partition_on_tablet
def Analysis():
# show table
show_table = [options.openmldb_bin_path, "--zk_cluster=" + options.zk_cluster, "--zk_root_path=" + options.zk_root_path,
"--role=ns_client", "--interactive=false", "--cmd=showtable"]
code, stdout,stderr = RunWithRetuncode(show_table)
if code != 0:
print "fail to show table"
return
partitions = GetTables(stdout)
leader_partitions = []
for p in partitions[options.endpoint]:
if p[4] == "leader":
leader_partitions.append(p)
if not leader_partitions:
print "you can restart the tablet directly"
return
print "the following cmd in ns should be executed for migrating the node"
for p in leader_partitions:
print ">changeleader %s %s auto"%(p[0], p[2])
print "the current leader and follower offset"
GetLeaderFollowerOffset(p[3], p[1], p[2])
print "use the following cmd in tablet to make sure the changeleader is done"
print ">getablestatus"
def GetLeaderFollowerOffset(endpoint, tid, pid):
command = [options.openmldb_bin_path, "--endpoint=%s"%endpoint, "--role=client", "--interactive=false", "--cmd=getfollower %s %s"%(tid, pid)]
code, stdout,stderr = RunWithRetuncode(command)
if code != 0:
print "fail to getfollower"
return
print stdout
def ChangeLeader():
# show tablet
show_tablet = list(common_cmd)
show_tablet.append("--cmd=showtablet")
_,stdout,_ = RunWithRetuncode(show_tablet)
print stdout
# show table
show_table = list(common_cmd)
show_table.append("--cmd=showtable")
code, stdout,stderr = RunWithRetuncode(show_table)
if code != 0:
print "fail to show table"
return
partitions = GetTables(stdout)
leader_partitions = []
for p in partitions[options.endpoint]:
if p[4] == "leader":
leader_partitions.append(p)
if not leader_partitions:
print "you can restart the tablet directly"
return
print "start to change leader on %s"%options.endpoint
for p in leader_partitions:
print "the current leader and follower offset"
GetLeaderFollowerOffset(p[3], p[1], p[2])
changeleader = list(common_cmd)
changeleader.append("--cmd=changeleader %s %s auto"%(p[0], p[2]))
msg = "command:%s \nwill be excute, sure to change leader(y/n):"%(" ".join(changeleader))
yes = yes_or_no_promot(msg)
if yes:
code, stdout, stderr = RunWithRetuncode(changeleader)
if code != 0:
print "fail to change leader for %s %s"%(p[0], p[2])
print stdout
print stderr
else:
print stdout
else:
print "skip to change leader for %s %s"%(p[0], p[2])
def RecoverEndpoint():
# show table
show_table = list(common_cmd)
show_table.append("--cmd=showtable")
code, stdout,stderr = RunWithRetuncode(show_table)
if code != 0:
print "fail to show table"
return
partitions = GetTables(stdout)
not_alive_partitions = []
for p in partitions[options.endpoint]:
# follower status no
# leader status no
if p[6] == "no":
not_alive_partitions.append(p)
if not not_alive_partitions:
print "no need recover not alive partition"
return
print "start to recover partiton on %s"%options.endpoint
for p in not_alive_partitions:
print "not a alive partition information"
print " ".join(p)
recover_cmd = list(common_cmd)
recover_cmd.append("--cmd=recovertable %s %s %s"%(p[0], p[2], options.endpoint))
msg = "command:%s \nwill be excute, sure to recover endpoint(y/n):"%(" ".join(recover_cmd))
yes = yes_or_no_promot(msg)
if yes:
code, stdout, stderr = RunWithRetuncode(recover_cmd)
if code != 0:
print "fail to recover partiton for %s %s on %s"%(p[0], p[2], options.endpoint)
print stdout
print stderr
else:
print stdout
else:
print "skip to recover partiton for %s %s on %s"%(p[0], p[2], options.endpoint)
def RecoverData():
# show table
show_table = list(common_cmd)
show_table.append("--cmd=showtable")
code, stdout,stderr = RunWithRetuncode(show_table)
if code != 0:
print "fail to show table"
return
# check whether table partition is not exixted
partitions = GetTables(stdout)
# print partitions
tablet_cmd = [options.openmldb_bin_path, "--role=client", "--interactive=false"]
for endpoint in partitions:
cmd_gettablestatus = "--cmd=gettablestatus"
gettablestatus = list(tablet_cmd)
gettablestatus.append("--endpoint=" + endpoint)
gettablestatus.append(cmd_gettablestatus)
code, stdout,stderr = RunWithRetuncode(gettablestatus)
table_status = GetTablesStatus(stdout)
if len(table_status) == 0:
continue
else:
print "endpoint {} has table partitions".format(endpoint)
return
conget_auto = list(common_cmd)
conget_auto.append("--cmd=confget auto_failover")
code, stdout,stderr = RunWithRetuncode(conget_auto)
auto_failover_flag = stdout.find("true")
if auto_failover_flag != -1:
# set auto failove is no
confset_no = list(common_cmd)
confset_no.append("--cmd=confset auto_failover false")
code, stdout,stderr = RunWithRetuncode(confset_no)
# print stdout
if code != 0:
print "set auto_failover is failed"
return
print "confset auto_failover false"
# updatetablealive $TABLE 1 172.27.128.37:9797 yes
# ./build/bin/openmldb --cmd="updatetablealive $TABLE 1 172.27.128.37:9797 yes" --role=ns_client --endpoint=172.27.128.37:6527 --interactive=false
# updatetablealive all of tables no
leader_table = {}
follower_table = []
for key in partitions:
tables = partitions[key]
for p in tables:
cmd_no = "--cmd=updatetablealive " + p[0] + " " + p[2] + " " + p[3] + " no"
update_alive_no = list(common_cmd)
update_alive_no.append(cmd_no)
code, stdout,stderr = RunWithRetuncode(update_alive_no)
if stdout.find("update ok") == -1:
print stdout
print "update table alive is failed"
return
# dont use code to determine result
if p[4] == "leader":
key = "{}_{}".format(p[1], p[2])
if leader_table.has_key(key):
tmp = leader_table[key]
if (tmp[8] < p[8]):
leader_table[key] = p
follower_table.append(tmp)
else:
follower_table.append(p)
else:
leader_table[key] = p
else:
follower_table.append(p)
print "updatetablealive tid[{}] pid[{}] endpoint[{}] no".format(p[1], p[2], p[3])
# ./build/bin/openmldb --cmd="loadtable $TABLE $TID $PID 144000 3 true" --role=client --endpoint=$TABLET_ENDPOINT --interactive=false
for key in leader_table:
# get table info
table = leader_table[key]
print "table leader: {}".format(table)
cmd_info = list(common_cmd)
cmd_info.append("--cmd=info " + table[0])
while True:
code, stdout,stderr = RunWithRetuncode(cmd_info)
if code != 0:
print "fail to get table info"
return
lines = stdout.split('\n')
if len(lines) >= 12:
storage_mode = lines[11].split()[1]
break
else:
print "get info connect error, retry in 1 second"
time.sleep(1)
# print key
cmd_loadtable = "--cmd=loadtable " + table[0] + " " + table[1] + " " + table[2] + " " + table[5].split("min")[0] + " 8" + " true " + storage_mode
# print cmd_loadtable
loadtable = list(tablet_cmd)
loadtable.append(cmd_loadtable)
loadtable.append("--endpoint=" + table[3])
# print loadtable
code, stdout,stderr = RunWithRetuncode(loadtable)
if stdout.find("LoadTable ok") == -1:
print stdout
print "load table is failed"
return
print "loadtable tid[{}] pid[{}]".format(table[1], table[2])
# check table status
count = 0
time.sleep(3)
while True:
flag = True
if count % 12 == 0:
print "loop check NO.{}".format(count)
for key in leader_table:
table = leader_table[key]
cmd_gettablestatus = "--cmd=gettablestatus"
gettablestatus = list(tablet_cmd)
gettablestatus.append("--endpoint=" + table[3])
gettablestatus.append(cmd_gettablestatus)
while True:
code, stdout,stderr = RunWithRetuncode(gettablestatus)
table_status = GetTablesStatus(stdout)
if table_status.has_key(key):
status = table_status[key]
break
else:
print "gettablestatus error, retry in 2 seconds"
time.sleep(2)
if status[3] == "kTableLeader":
if count % 12 == 0:
print "{} status: {}".format(key, status[4])
if status[4] != "kTableNormal":
flag = False
else:
# update table is alive
cmd_yes = "--cmd=updatetablealive " + table[0] + " " + table[2] + " " + table[3] + " yes"
update_alive_yes = list(common_cmd)
update_alive_yes.append(cmd_yes)
code, stdout,stderr = RunWithRetuncode(update_alive_yes)
if stdout.find("update ok") == -1:
print stdout
print "update table alive is failed"
return
break
if flag == True:
print "Load table is ok"
break
if count % 12 == 0:
print "loading table, please wait a moment"
count = count + 1
time.sleep(5)
# recovertable table_name pid endpoint
for table in follower_table:
# print table
cmd_recovertable = "--cmd=recovertable " + table[0] + " " + table[2] + " " + table[3]
recovertable = list(common_cmd)
recovertable.append(cmd_recovertable)
code, stdout,stderr = RunWithRetuncode(recovertable)
if stdout.find("recover table ok") == -1:
print stdout
print "recover is failed"
return
print "recovertable tid[{}] pid[{}] endpoint[{}]".format(table[1], table[2], table[3])
# print stdout
if auto_failover_flag != -1:
# set auto failove is no
confset_no = list(common_cmd)
confset_no.append("--cmd=confset auto_failover true")
code, stdout,stderr = RunWithRetuncode(confset_no)
# print stdout
if code != 0:
print "set auto_failover true is failed"
return
print "confset auto_failover true"
def PrintLog(log_cmd, ret_code, ret_stdout, ret_stderr):
print log_cmd
if ret_code != 0:
print ret_stdout
print ret_stderr
raise Exception, "FAIL !!!"
else:
print ret_stdout
def GetTablesDic(output):
lines = output.split("\n")
content_is_started = False
partition_on_tablet = {}
for line in lines:
if line.startswith("---------"):
content_is_started = True
continue
if not content_is_started:
continue
partition = line.split()
if len(partition) < 4:
continue
partitions = partition_on_tablet.get(partition[2], {})
partitions[partition[3]] = partition
partition_on_tablet[partition[2]] = partitions
return partition_on_tablet
def BalanceLeader():
auto_failover_flag = -1
try:
# get log
conget_auto = list(common_cmd)
conget_auto.append("--cmd=confget auto_failover")
code, stdout,stderr = RunWithRetuncode(conget_auto)
auto_failover_flag = stdout.find("true")
if auto_failover_flag != -1:
# set auto failove is no
confset_no = list(common_cmd)
confset_no.append("--cmd=confset auto_failover false")
code, stdout,stderr = RunWithRetuncode(confset_no)
# print stdout
PrintLog("set auto_failover false", code, stdout, stderr)
# get table info from file
with open(options.showtable_path, "r") as f:
tables = f.read()
partitions = GetTables(tables)
ori_leader_partitions = []
for endpoint in partitions:
for p in partitions[endpoint]:
if p[4] == "leader" and p[6] == "yes":
ori_leader_partitions.append(p)
if not ori_leader_partitions:
print "no leader"
return
# get current table info
show_table = list(common_cmd)
show_table.append("--cmd=showtable")
code, stdout,stderr = RunWithRetuncode(show_table)
PrintLog("showtable", code, stdout, stderr)
partitions = GetTablesDic(stdout)
time.sleep(1)
not_alive_partitions = []
for pid in partitions.keys():
for endpoint in partitions[pid]:
if partitions[pid][endpoint][6] == "no":
not_alive_partitions.append(partitions[pid][endpoint])
for p in not_alive_partitions:
recover_cmd = list(common_cmd)
recover_cmd.append("--cmd=recovertable %s %s %s"%(p[0], p[2], p[3]))
code, stdout, stderr = RunWithRetuncode(recover_cmd)
PrintLog("recovertable %s %s %s"%(p[0], p[2], p[3]), code, stdout, stderr)
time.sleep(1)
# balance leader
print "start to balance leader"
for p in ori_leader_partitions:
if partitions[p[2]][p[3]][4]=="leader" and partitions[p[2]][p[3]][6]=="yes":
continue
changeleader = list(common_cmd)
changeleader.append("--cmd=changeleader %s %s %s"%(p[0], p[2], p[3]))
code, stdout, stderr = RunWithRetuncode(changeleader)
PrintLog("changeleader %s %s %s"%(p[0], p[2], p[3]), code, stdout, stderr)
time.sleep(1)
# find not_alive_partition
show_table = list(common_cmd)
show_table.append("--cmd=showtable")
code, stdout,stderr = RunWithRetuncode(show_table)
partitions = GetTables(stdout)
not_alive_partitions = []
for endpoint in partitions.keys():
for p in partitions[endpoint]:
if p[6] == "no":
not_alive_partitions.append(p)
for p in not_alive_partitions:
print "not alive partition information"
print " ".join(p)
recover_cmd = list(common_cmd)
recover_cmd.append("--cmd=recovertable %s %s %s"%(p[0], p[2], p[3]))
code, stdout, stderr = RunWithRetuncode(recover_cmd)
if code != 0:
print "fail to recover partiton for %s %s on %s"%(p[0], p[2], p[3])
print stdout
print stderr
else:
print stdout
print "balance leader success!"
except Exception,ex:
print "balance leader fail!"
return -1
finally:
if auto_failover_flag != -1:
# recover auto failover
confset_no = list(common_cmd)
confset_no.append("--cmd=confset auto_failover true")
print "confset auto_failover true"
code, stdout,stderr = RunWithRetuncode(confset_no)
if code != 0:
print "set auto_failover failed"
def Main():
if options.cmd == "analysis":
Analysis()
elif options.cmd == "changeleader":
ChangeLeader()
elif options.cmd == "recovertable":
RecoverEndpoint()
elif options.cmd == "recoverdata":
RecoverData()
elif options.cmd == "balanceleader":
BalanceLeader()
if __name__ == "__main__":
Main()
|
.modules/.recon-ng/modules/recon/locations-locations/reverse_geocode.py | termux-one/EasY_HaCk | 1,103 | 12711957 | from recon.core.module import BaseModule
class Module(BaseModule):
meta = {
'name': 'Reverse Geocoder',
'author': '<NAME> (<EMAIL>)',
'description': 'Queries the Google Maps API to obtain an address from coordinates.',
'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL',
}
def module_run(self, points):
for point in points:
self.verbose("Reverse geocoding (%s)..." % (point))
payload = {'latlng' : point, 'sensor' : 'false'}
url = 'https://maps.googleapis.com/maps/api/geocode/json'
resp = self.request(url, payload=payload)
# kill the module if nothing is returned
if len(resp.json['results']) == 0:
self.output('Unable to resolve an address for (%s).' % (point))
return
# loop through the results
found = False
for result in resp.json['results']:
if result['geometry']['location_type'] == 'ROOFTOP':
found = True
lat = point.split(',')[0]
lon = point.split(',')[1]
address = result['formatted_address']
# store the result
self.add_locations(lat, lon, address)
if found: self.query('DELETE FROM locations WHERE latitude=? AND longitude=? AND street_address IS NULL', (lat, lon))
|
server/apps/recommendation/apps.py | Mayandev/django_morec | 129 | 12711988 | from django.apps import AppConfig
class RecommendationConfig(AppConfig):
name = 'recommendation'
# app名字后台显示中文
verbose_name = "推荐管理"
|
pybo/policies/__init__.py | hfukada/pybo | 115 | 12711993 | """
Acquisition functions.
"""
# pylint: disable=wildcard-import
from .simple import *
from . import simple
__all__ = []
__all__ += simple.__all__
|
shim/opentelemetry-opentracing-shim/tests/testbed/test_subtask_span_propagation/test_asyncio.py | oxeye-nikolay/opentelemetry-python | 868 | 12712008 | from __future__ import absolute_import, print_function
import asyncio
from ..otel_ot_shim_tracer import MockTracer
from ..testcase import OpenTelemetryTestCase
class TestAsyncio(OpenTelemetryTestCase):
def setUp(self):
self.tracer = MockTracer()
self.loop = asyncio.get_event_loop()
def test_main(self):
res = self.loop.run_until_complete(self.parent_task("message"))
self.assertEqual(res, "message::response")
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 2)
self.assertNamesEqual(spans, ["child", "parent"])
self.assertIsChildOf(spans[0], spans[1])
async def parent_task(self, message): # noqa
with self.tracer.start_active_span("parent"):
res = await self.child_task(message)
return res
async def child_task(self, message):
# No need to pass/activate the parent Span, as it stays in the context.
with self.tracer.start_active_span("child"):
return f"{message}::response"
|
chapter11/observer.py | JoeanAmiee/Mastering-Python-Design-Patterns-Second-Edition | 278 | 12712032 | <gh_stars>100-1000
class Publisher:
def __init__(self):
self.observers = []
def add(self, observer):
if observer not in self.observers:
self.observers.append(observer)
else:
print(f'Failed to add: {observer}')
def remove(self, observer):
try:
self.observers.remove(observer)
except ValueError:
print(f'Failed to remove: {observer}')
def notify(self):
[o.notify(self) for o in self.observers]
class DefaultFormatter(Publisher):
def __init__(self, name):
Publisher.__init__(self)
self.name = name
self._data = 0
def __str__(self):
return f"{type(self).__name__}: '{self.name}' has data = {self._data}"
@property
def data(self):
return self._data
@data.setter
def data(self, new_value):
try:
self._data = int(new_value)
except ValueError as e:
print(f'Error: {e}')
else:
self.notify()
class HexFormatterObs:
def notify(self, publisher):
value = hex(publisher.data)
print(f"{type(self).__name__}: '{publisher.name}' has now hex data = {value}")
class BinaryFormatterObs:
def notify(self, publisher):
value = bin(publisher.data)
print(f"{type(self).__name__}: '{publisher.name}' has now bin data = {value}")
def main():
df = DefaultFormatter('test1')
print(df)
print()
hf = HexFormatterObs()
df.add(hf)
df.data = 3
print(df)
print()
bf = BinaryFormatterObs()
df.add(bf)
df.data = 21
print(df)
print()
df.remove(hf)
df.data = 40
print(df)
print()
df.remove(hf)
df.add(bf)
df.data = 'hello'
print(df)
print()
df.data = 15.8
print(df)
if __name__ == '__main__':
main()
|
env/Lib/site-packages/OpenGL/GLES1/OES/required_internalformat.py | 5gconnectedbike/Navio2 | 210 | 12712034 | '''OpenGL extension OES.required_internalformat
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.required_internalformat to provide a more
Python-friendly API
Overview (from the spec)
The ES 1.1 API allows an implementation to store texture data internally
with arbitrary precision, regardless of the format and type of the data
supplied by the application. Similarly, ES allows an implementation to
choose an arbitrary precision for the internal storage of image data
allocated by glRenderbufferStorageOES.
While this allows flexibility for implementations, it does mean that an
application does not have a reliable means to request the implementation
maintain a specific precision or to find out what precision the
implementation will maintain for a given texture or renderbuffer image.
For reference, "Desktop" OpenGL uses the <internalformat> argument to
glTexImage*, glCopyTexImage* and glRenderbufferStorageEXT as a hint,
defining the particular base format and precision that the application wants
the implementation to maintain when storing the image data. Further, the
application can choose an <internalformat> with a different base internal
format than the source format specified by <format>. The implementation is
not required to exactly match the precision specified by <internalformat>
when choosing an internal storage precision, but it is required to match the
base internal format of <internalformat>.
In addition, ES 1.1 does not allow an implementation to fail a request to
glTexImage2D for any of the legal <format> and <type> combinations listed in
Table 3.4, even if the implementation does not natively support data stored
in that external <format> and <type>. However, there are no additional
requirements placed on the implementation. The ES implementation is free to
store the texture data with lower precision than originally specified, for
instance. Further, since ES removes the ability to query the texture object
to find out what internal format it chose, there is no way for the
application to find out that this has happened.
This extension addresses the situation in two ways:
1) This extension introduces the ability for an application to specify
the desired "sized" internal formats for texture image allocation.
2) This extension guarantees to maintain at least the specified
precision of all available sized internal formats.
An implementation that exports this extension is committing to support all
of the legal values for <internalformat> in Tables 3.4, 3.4.x, and 3.4.y,
subject to the extension dependencies described herein. That is to say, the
implementation is guaranteeing that choosing an <internalformat> argument
with a value from these tables will not cause an image allocation request to
fail. Furthermore, it is guaranteeing that for any sized internal format,
the renderbuffer or texture data will be stored with at least the precision
prescribed by the sized internal format.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/required_internalformat.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.required_internalformat import *
from OpenGL.raw.GLES1.OES.required_internalformat import _EXTENSION_NAME
def glInitRequiredInternalformatOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
xfel/lcls_api/exercise_api.py | dperl-sol/cctbx_project | 155 | 12712100 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import psana
from xfel.lcls_api.psana_cctbx import CctbxPsanaEventProcessor
def simple_example(experiment, run_number, detector_address, params_file, event_num):
""" Demo using the cctbx/lcls api
@param experiment LCLS experiment string
@param run_number Run number
@param params_file cctbx/DIALS parameter file for processing
@param event_num Index for specific event to process
"""
output_tag = '%s_run%d'%(experiment, run_number)
print("Getting datasource")
ds = psana.DataSource('exp=%s:run=%d'%(experiment, run_number))
processor = CctbxPsanaEventProcessor(params_file, output_tag, logfile = output_tag + ".log")
for run in ds.runs():
print("Getting detector")
det = psana.Detector(detector_address)
processor.setup_run(run, det)
for event_id, event in enumerate(ds.events()):
print(event_id)
if event_num is not None and event_id != event_num: continue
processor.process_event(event, str(event_id))
break
break
processor.finalize()
def full_api_example(experiment, run_number, detector_address, params_file, event_num):
""" Demo using the cctbx/lcls api
@param experiment LCLS experiment string
@param run_number Run number
@param params_file cctbx/DIALS parameter file for processing
@param event_num Index for specific event to process
"""
output_tag = '%s_run%d'%(experiment, run_number)
print("Getting datasource")
ds = psana.DataSource('exp=%s:run=%d'%(experiment, run_number))
processor = CctbxPsanaEventProcessor(params_file, output_tag) # note, logfile already initialized in this demo, so don't do it twice
for run in ds.runs():
print("Getting detector")
det = psana.Detector(detector_address)
processor.setup_run(run, det)
for event_id, event in enumerate(ds.events()):
print(event_id)
if event_num is not None and event_id != event_num: continue
tag = '%s_%s'%(output_tag, str(event_id))
experiments = processor.experiments_from_event(event)
processor.tag = tag
processor.setup_filenames(tag)
try:
processor.pre_process(experiments)
observed = processor.find_spots(experiments)
experiments, indexed = processor.index(experiments, observed)
experiments, indexed = processor.refine(experiments, indexed)
integrated = processor.integrate(experiments, indexed)
print("Integrated %d spots on %d lattices"%(len(integrated), len(experiments)))
except Exception as e:
print("Couldn't process event %d"%event_id, str(e))
break
break
processor.finalize()
if __name__ == '__main__':
import sys
experiment, run_number, detector_address, params_file, event_num = sys.argv[1:6]
simple_example(experiment, int(run_number), detector_address, params_file, int(event_num))
full_api_example(experiment, int(run_number), detector_address, params_file, int(event_num))
|
care/facility/migrations/0254_patientnotes.py | gigincg/care | 189 | 12712121 | # Generated by Django 2.2.11 on 2021-06-12 14:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('facility', '0253_auto_20210612_1256'),
]
operations = [
migrations.CreateModel(
name='PatientNotes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, unique=True)),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, db_index=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('note', models.TextField(blank=True, default='')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('facility', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='facility.Facility')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='facility.PatientRegistration')),
],
options={
'abstract': False,
},
),
]
|
alg/compartmental_gp/data_loader.py | loramf/mlforhealthlabpub | 171 | 12712125 | <gh_stars>100-1000
import pandas as pds
import numpy as np
from datetime import datetime
import torch
def numpy_fill(arr):
mask = np.isnan(arr)
idx = np.where(~mask,np.arange(mask.shape[1]),0)
np.maximum.accumulate(idx,axis=1, out=idx)
out = arr[np.arange(idx.shape[0])[:,None], idx]
return out
def get_intervention(country, standarize=False, smooth=True, legacy=False):
csvs = [
'c1_schoolclosing.csv',
'c2_workplaceclosing.csv',
'c3_cancelpublicevents.csv',
'c4_restrictionsongatherings.csv',
'c5_closepublictransport.csv',
'c6_stayathomerequirements.csv',
'c7_domestictravel.csv',
'c8_internationaltravel.csv',
'e1_incomesupport.csv',
'e2_debtcontractrelief.csv',
'h1_publicinfocampaign.csv',
'h2_testingpolicy.csv'
] + ['c{}_flag.csv'.format(x) for x in range(1, 8)] + ['e1_flag.csv', 'h1_flag.csv']
if not legacy:
files = ['ox-policy-tracker/data/timeseries/{}'.format(i) for i in csvs]
else:
files = ['covid-policy-tracker-legacy/data/timeseries/{}'.format(i) for i in csvs]
idx_list = []
for f in files:
dat_ox = pds.read_csv(f)
dat_ox.rename(columns={'Unnamed: 0': 'country', 'Unnamed: 1': 'country_code'}, inplace=True)
dat_ox[dat_ox == '.'] = 'NaN'
dt_list = [datetime.strptime(x, '%d%b%Y').date() for x in dat_ox.columns[2:]]
dat_country = dat_ox[dat_ox['country'] == country]
index_country = dat_country.iloc[0, 2:].values.astype(np.float)
# fill na with previous value
index_country = numpy_fill(index_country[None, :])
# handle the case of initial zeros
index_country[np.isnan(index_country)] = 0
idx_list.append(index_country[0, :])
idx = np.stack(idx_list, -1)
if standarize:
idx = (idx - np.mean(idx, axis=0)) / np.std(idx, axis=0)
idx[np.isnan(idx)] = 0
if smooth:
dy_list = list()
for i in range(idx.shape[1]):
ds = idx[:, i]
dy = smooth_curve_1d(ds)
dy_list.append(dy)
idx = np.stack(dy_list, axis=-1)
return idx
def smooth_curve_1d(x):
w = np.ones(7, 'd')
y = np.convolve(w / w.sum(), x, mode='valid')
y = np.concatenate([np.zeros(3), y])
return y
def get_deaths(country, to_torch=False, legacy=False, smart_start=True, pad=0, rebuttal=False):
# get time series
if not legacy:
file = 'ts-data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
else:
file = 'COVID-19-legacy/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
if rebuttal:
file = 'COVID-19-rebuttal-08-10/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
dat = pds.read_csv(file)
dt_list = [datetime.strptime(x, '%m/%d/%y').date() for x in dat.columns[4:]]
if country not in ['China', 'Canada']:
country_data = dat[(dat['Country/Region'] == country) & (dat['Province/State'].isnull())].iloc[0, 4:].values
else:
country_data = np.sum(dat[(dat['Country/Region'] == country)].iloc[:, 4:].values, axis=0)
ind = (country_data != 0).argmax() - pad
if ind < 0:
print(country)
ind = 0
# assert ind >= 0
cum_deaths = country_data[ind:].astype(np.float64)
dt_list = dt_list[ind:]
daily_deaths = np.diff(np.append(np.zeros(1), cum_deaths))
if country == 'Philippines':
cum_deaths = cum_deaths[39:]
dt_list = dt_list[39:]
daily_deaths = daily_deaths[39:]
if country == 'France':
cum_deaths = cum_deaths[17:]
dt_list = dt_list[17:]
daily_deaths = daily_deaths[17:]
# get population
dat_feat = pds.read_csv('country_feature/country_feats.csv')
if country == 'US':
p_country = 'United States'
elif country == 'Korea, South':
p_country = 'Korea, Rep.'
elif country == 'Iran':
p_country = 'Iran, Islamic Rep.'
elif country == 'Russia':
p_country = 'Russian Federation'
elif country == 'Egypt':
p_country = 'Egypt, Arab Rep.'
else:
p_country = country
population = dat_feat[(dat_feat['Country.Name'] == p_country) & (dat_feat['metric'] == 'Population, total')]
population = population['value'].values[0]
# define the starting point
if smart_start:
rate = 3.061029261722505e-08
daily_death_min = rate * population
ind_death = ((daily_deaths >= daily_death_min) * .1).argmax()
cum_deaths = cum_deaths[ind_death:]
dt_list = dt_list[ind_death:]
daily_deaths = daily_deaths[ind_death:]
# get oxford index
if not legacy:
dat_ox = pds.read_csv('ox-policy-tracker/data/timeseries/stringencyindex_legacy.csv')
else:
dat_ox = pds.read_csv('covid-policy-tracker-legacy/data/timeseries/stringencyindex_legacy.csv')
dat_ox.rename(columns={'Unnamed: 0': 'country', 'Unnamed: 1': 'country_code'}, inplace=True)
dt_list_ind = [datetime.strptime(x, '%d%b%Y').date() for x in dat_ox.columns[2:]]
dat_ox[dat_ox == '.'] = 'NaN'
if country == 'US':
o_country = 'United States'
elif country == 'Korea, South':
o_country = 'South Korea'
else:
o_country = country
dat_country = dat_ox[dat_ox['country'] == o_country]
# 7d mv smooth
index_country = dat_country.iloc[0, 2:].values.astype(np.float)
ind_len = len(index_country)
index_country = smooth_curve_1d(index_country)[:ind_len]
index_country[np.isnan(index_country)] = np.nanmean(index_country)
intervention = get_intervention(o_country, legacy)
if not to_torch:
return {
'dt': dt_list,
'cum_death': cum_deaths,
'daily_death': daily_deaths,
'population': population,
's_index_dt': dt_list_ind,
's_index': index_country,
'intervention': intervention
}
else:
return {
'dt': dt_list,
'cum_death': torch.tensor(cum_deaths),
'daily_death': torch.tensor(daily_deaths),
'population': population,
's_index_dt': dt_list_ind,
's_index': torch.tensor(index_country),
'intervention': torch.tensor(intervention)
}
def pad_sequence_trailing(sequences, padding_value=0):
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max([s.size(0) for s in sequences])
out_dims = (max_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[-length:, i, ...] = tensor
return out_tensor
def cut_s_index(data_dict):
ind = data_dict['s_index_dt'].index(data_dict['dt'][0])
s_len = len(data_dict['cum_death'])
s_index = data_dict['s_index'][ind:ind + s_len]
intervention = data_dict['intervention'][ind:ind + s_len]
return s_index, intervention
def get_data_pyro(countries, legacy=False, smart_start=True, pad=0, rebuttal=False):
data_list = [get_deaths(x, True, legacy, smart_start, pad, rebuttal) for x in countries]
init_days = [x['dt'][0] for x in data_list]
init_day = min(init_days)
t_first_blood = [(x - init_day).days for x in init_days]
cum_death = pad_sequence_trailing([x['cum_death'] for x in data_list])
daily_death = pad_sequence_trailing([x['daily_death'] for x in data_list])
si_cut = [cut_s_index(x) for x in data_list]
s_index = pad_sequence_trailing([x[0] for x in si_cut]) / 100
i_index = pad_sequence_trailing([x[1] for x in si_cut])
N_list = [x['population'] for x in data_list]
date_list = pds.date_range(init_day, periods=cum_death.size(0))
country_feat = get_country_feature(countries)
feat_list = [
'Mortality from CVD, cancer, diabetes or CRD between exact ages 30 and 70 (%)',
'Mortality rate, adult, male (per 1,000 male adults)',
'Mortality rate attributed to household and ambient air pollution, age-standardized (per 100,000 population)',
'Incidence of tuberculosis (per 100,000 people)',
'Immunization, measles (% of children ages 12-23 months)',
'Immunization, DPT (% of children ages 12-23 months)',
'Immunization, HepB3 (% of one-year-old children)',
'Cause of death, by communicable diseases and maternal, prenatal and nutrition conditions (% of total)',
'Prevalence of overweight (% of adults)'
]
country_feat = country_feat[country_feat.metric.isin(feat_list)]
dat_feat = country_feat.pivot('country', 'metric', 'value')
feat = np.zeros_like(dat_feat.values)
for i in range(len(countries)):
feat[i] = dat_feat.loc[countries[i]].values
feat = (feat - np.nanmean(feat, axis=0)) / np.nanstd(feat, axis=0)
feat[np.isnan(feat)] = 0.
return {
'cum_death': cum_death,
'daily_death': daily_death,
's_index': s_index,
'i_index': i_index,
'population': N_list,
't_init': torch.tensor(t_first_blood).unsqueeze(-1),
'date_list': date_list,
'countries': countries,
'country_feat': torch.tensor(feat).to(i_index)
}
def get_country_feature(country_list):
dat_feat = pds.read_csv('country_feature/country_feats.csv')
p_country_list = []
for country in country_list:
if country == 'US':
p_country = 'United States'
elif country == 'Korea, South':
p_country = 'Korea, Rep.'
elif country == 'Iran':
p_country = 'Iran, Islamic Rep.'
elif country == 'Russia':
p_country = 'Russian Federation'
elif country == 'Egypt':
p_country = 'Egypt, Arab Rep.'
else:
p_country = country
p_country_list.append(p_country)
dat_feat = dat_feat[(dat_feat['Country.Name'].isin(p_country_list))]
del dat_feat['Country.Code']
dat_feat['country'] = dat_feat['Country.Name']
del dat_feat['Country.Name']
countries = dat_feat['country'].values
countries[countries == 'United States'] = 'US'
countries[countries == 'Korea, Rep.'] = 'Korea, South'
countries[countries == 'Iran, Islamic Rep.'] = 'Iran'
countries[countries == 'Russian Federation'] = 'Russia'
countries[countries == 'Egypt, Arab Rep.'] = 'Egypt'
dat_feat['country'] = list(countries)
return dat_feat
|
tensorflow_graphics/projects/gan/exponential_moving_average_test.py | sarvex/graphics | 2,759 | 12712142 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gan.exponential_moving_average."""
import tensorflow as tf
from tensorflow_graphics.projects.gan import exponential_moving_average
class ExponentialMovingAverageTest(tf.test.TestCase):
def test_decay_one_values_are_from_initialization(self):
ema = exponential_moving_average.ExponentialMovingAverage(decay=1.0)
initial_value = 2.0
variable = tf.Variable(initial_value)
ema.apply((variable,))
variable.assign(3.0)
ema.apply((variable,))
self.assertAllClose(ema.averaged_variables[0], initial_value)
def test_decay_zero_returns_last_value(self):
ema = exponential_moving_average.ExponentialMovingAverage(decay=0.0)
final_value = 3.0
variable = tf.Variable(2.0)
ema.apply((variable,))
variable.assign(final_value)
ema.apply((variable,))
self.assertAllClose(ema.averaged_variables[0], final_value)
def test_cross_replica_context_raises_error(self):
ema = exponential_moving_average.ExponentialMovingAverage(decay=0.0)
with self.assertRaisesRegex(
NotImplementedError, 'Cross-replica context version not implemented.'):
with tf.distribute.MirroredStrategy().scope():
variable = tf.Variable(2.0)
ema.apply((variable,))
def test_mirrored_strategy_replica_context_runs(self):
ema = exponential_moving_average.ExponentialMovingAverage(decay=0.5)
strategy = tf.distribute.MirroredStrategy()
def apply_to_ema(variable):
ema.apply((variable,))
with strategy.scope():
variable = tf.Variable(2.0)
strategy.run(apply_to_ema, (variable,))
self.assertAllClose(ema.averaged_variables[0], variable.read_value())
if __name__ == '__main__':
tf.test.main()
|
cacreader/swig-4.0.2/Examples/contract/simple_cxx/runme3.py | kyletanyag/LL-Smartcard | 1,031 | 12712153 | import example
# Create the Circle object
r = 2;
print " Creating circle (radium: %d) :" % r
c = example.Circle(r)
# Set the location of the object
c.x = 20
c.y = 30
print " Here is its current position:"
print " Circle = (%f, %f)" % (c.x,c.y)
# ----- Call some methods -----
print "\n Here are some properties of the Circle:"
print " area = ", c.area()
print " perimeter = ", c.perimeter()
dx = 1;
dy = 1;
print " Moving with (%d, %d)..." % (dx, dy)
c.move(dx, dy)
del c
print "==================================="
# test move function */
r = 2;
print " Creating circle (radium: %d) :" % r
c = example.Circle(r)
# Set the location of the object
c.x = 20
c.y = 30
print " Here is its current position:"
print " Circle = (%f, %f)" % (c.x,c.y)
# ----- Call some methods -----
print "\n Here are some properties of the Circle:"
print " area = ", c.area()
print " perimeter = ", c.perimeter()
# no error for Circle's pre-assertion
dx = 1;
dy = -1;
print " Moving with (%d, %d)..." % (dx, dy)
c.move(dx, dy)
# error with Shape's pre-assertion
dx = -1;
dy = 1;
print " Moving with (%d, %d)..." % (dx, dy)
c.move(dx, dy)
|
nodemcu/nodemcu-uploader/nodemcu-uploader.py | kvderevyanko/price | 324 | 12712156 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2019 <NAME> <<EMAIL>>
# pylint: disable=C0103
"""makes it easier to run nodemcu-uploader from command line"""
from nodemcu_uploader import main
if __name__ == '__main__':
main.main_func()
|
vision3d/detector/proposal.py | jhultman/PV-RCNN | 131 | 12712172 | <reponame>jhultman/PV-RCNN
import torch
import math
from torch import nn
import torch.nn.functional as F
from vision3d.ops import sigmoid_focal_loss, batched_nms_rotated
from vision3d.core.box_encode import decode
class ProposalLayer(nn.Module):
"""
Use BEV feature map to generate 3D box proposals.
TODO: Fix long variable names, ugly line wraps.
"""
def __init__(self, cfg):
super(ProposalLayer, self).__init__()
self.cfg = cfg
self.conv_cls = nn.Conv2d(
cfg.PROPOSAL.C_IN, cfg.NUM_CLASSES * cfg.NUM_YAW, 1)
self.conv_reg = nn.Conv2d(
cfg.PROPOSAL.C_IN, cfg.NUM_CLASSES * cfg.NUM_YAW * cfg.BOX_DOF, 1)
self.TOPK, self.DOF = cfg.PROPOSAL.TOPK, cfg.BOX_DOF
self._init_weights()
def _init_weights(self):
nn.init.constant_(self.conv_cls.bias, (-math.log(1 - .01) / .01))
nn.init.constant_(self.conv_reg.bias, 0)
for m in (self.conv_cls.weight, self.conv_reg.weight):
nn.init.normal_(m, std=0.01)
def _generate_group_idx(self, B, n_cls):
"""Compute unique group_idx based on (batch_idx, class_idx) tuples."""
batch_idx = torch.arange(B)[:, None].expand(-1, n_cls)
class_idx = torch.arange(n_cls)[None, :].expand(B, -1)
group_idx = class_idx + n_cls * batch_idx
b, c, g = [x[..., None].expand(-1, -1, self.TOPK).reshape(-1)
for x in (batch_idx, class_idx, group_idx)]
return b, c, g
def _above_score_thresh(self, scores, class_idx):
"""Classes may have different score thresholds."""
thresh = scores.new_tensor([a['score_thresh'] for a in self.cfg.ANCHORS])
mask = scores > thresh[class_idx]
return mask
def _multiclass_batch_nms(self, boxes, scores):
"""Only boxes with same group_idx are jointly considered in nms"""
B, n_cls = scores.shape[:2]
scores = scores.view(-1)
boxes = boxes.view(-1, self.DOF)
bev_boxes = boxes[:, [0, 1, 3, 4, 6]]
batch_idx, class_idx, group_idx = self._generate_group_idx(B, n_cls)
idx = batched_nms_rotated(bev_boxes, scores, group_idx, iou_threshold=0.01)
boxes, batch_idx, class_idx, scores = \
[x[idx] for x in (boxes, batch_idx, class_idx, scores)]
mask = self._above_score_thresh(scores, class_idx)
out = [x[mask] for x in (boxes, batch_idx, class_idx, scores)]
return out
def _decode(self, reg_map, anchors, anchor_idx):
"""Expands anchors in batch dimension and calls decode."""
B, n_cls = reg_map.shape[:2]
anchor_idx = anchor_idx[..., None].expand(-1, -1, -1, self.DOF)
deltas = reg_map.reshape(B, n_cls, -1, self.cfg.BOX_DOF) \
.gather(2, anchor_idx)
anchors = anchors.view(1, n_cls, -1, self.cfg.BOX_DOF) \
.expand(B, -1, -1, -1).gather(2, anchor_idx)
boxes = decode(deltas, anchors)
return boxes
def inference(self, feature_map, anchors):
""":return (boxes, batch_idx, class_idx, scores)"""
cls_map, reg_map = self(feature_map)
score_map = cls_map.sigmoid_()
B, n_cls = score_map.shape[:2]
scores, anchor_idx = score_map.view(B, n_cls, -1).topk(self.TOPK, -1)
boxes = self._decode(reg_map, anchors, anchor_idx)
out = self._multiclass_batch_nms(boxes, scores)
return out
def reshape_cls(self, cls_map):
B, _, ny, nx = cls_map.shape
shape = (B, self.cfg.NUM_CLASSES, self.cfg.NUM_YAW, ny, nx)
cls_map = cls_map.view(shape)
return cls_map
def reshape_reg(self, reg_map):
B, _, ny, nx = reg_map.shape
shape = (B, self.cfg.NUM_CLASSES, self.cfg.BOX_DOF, -1, ny, nx)
reg_map = reg_map.view(shape).permute(0, 1, 3, 4, 5, 2)
return reg_map
def forward(self, feature_map):
cls_map = self.reshape_cls(self.conv_cls(feature_map))
reg_map = self.reshape_reg(self.conv_reg(feature_map))
return cls_map, reg_map
class ProposalLoss(nn.Module):
"""
Notation: (P_i, G_i, M_i) ~ (predicted, ground truth, mask).
Loss is averaged by number of positive examples.
TODO: Replace with compiled cuda focal loss.
"""
def __init__(self, cfg):
super(ProposalLoss, self).__init__()
self.cfg = cfg
def masked_sum(self, loss, mask):
"""Mask assumed to be binary."""
mask = mask.type_as(loss)
loss = (loss * mask).sum()
return loss
def reg_loss(self, P_reg, G_reg, M_reg):
"""Loss applied at all positive sites."""
P_xyz, P_wlh, P_yaw = P_reg.split([3, 3, 1], dim=-1)
G_xyz, G_wlh, G_yaw = G_reg.split([3, 3, 1], dim=-1)
loss_xyz = F.smooth_l1_loss(P_xyz, G_xyz, reduction='none')
loss_wlh = F.smooth_l1_loss(P_wlh, G_wlh, reduction='none')
loss_yaw = F.smooth_l1_loss(P_yaw, G_yaw, reduction='none') / math.pi
loss = self.masked_sum(loss_xyz + loss_wlh + loss_yaw, M_reg)
return loss
def cls_loss(self, P_cls, G_cls, M_cls):
"""Loss is applied at all non-ignore sites. Assumes logit scores."""
loss = sigmoid_focal_loss(P_cls, G_cls.float(), reduction='none')
loss = self.masked_sum(loss, M_cls)
return loss
def forward(self, item):
keys = ['G_cls', 'M_cls', 'P_cls', 'G_reg', 'M_reg', 'P_reg']
G_cls, M_cls, P_cls, G_reg, M_reg, P_reg = map(item.get, keys)
normalizer = M_reg.type_as(P_reg).sum().clamp_(min=1)
cls_loss = self.cls_loss(P_cls, G_cls, M_cls) / normalizer
reg_loss = self.reg_loss(P_reg, G_reg, M_reg) / normalizer
loss = cls_loss + self.cfg.TRAIN.LAMBDA * reg_loss
losses = dict(cls_loss=cls_loss, reg_loss=reg_loss, loss=loss)
return losses
|
fiftyone/types/__init__.py | FLIR/fiftyone | 1,130 | 12712192 | <reponame>FLIR/fiftyone<filename>fiftyone/types/__init__.py
"""
FiftyOne types.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
# pylint: disable=wildcard-import,unused-wildcard-import
from .dataset_types import *
|
plenum/test/audit_ledger/test_first_audit_catchup_during_ordering.py | andkononykhin/plenum | 148 | 12712252 | import pytest
from plenum.test import waits
from plenum.common.constants import LEDGER_STATUS, DOMAIN_LEDGER_ID
from plenum.common.messages.node_messages import MessageReq, CatchupReq
from plenum.server.catchup.node_leecher_service import NodeLeecherService
from plenum.test.delayers import ppDelay, pDelay, cDelay, DEFAULT_DELAY
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_request.test_timestamp.helper import get_timestamp_suspicion_count
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules, start_delaying, stop_delaying_and_process
from stp_core.loop.eventually import eventually
def delay_domain_ledger_catchup():
def delay(msg):
msg = msg[0]
if isinstance(msg, MessageReq) and \
msg.msg_type == LEDGER_STATUS and \
msg.params.get('ledgerId') == DOMAIN_LEDGER_ID:
return DEFAULT_DELAY
if isinstance(msg, CatchupReq) and \
msg.ledgerId == DOMAIN_LEDGER_ID:
return DEFAULT_DELAY
return delay
def test_first_audit_catchup_during_ordering(tdir, tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
lagging_node = txnPoolNodeSet[-1]
other_nodes = txnPoolNodeSet[:-1]
other_stashers = [node.nodeIbStasher for node in other_nodes]
def lagging_node_state() -> NodeLeecherService.State:
return lagging_node.ledgerManager._node_leecher._state
def check_lagging_node_is_not_syncing_audit():
assert lagging_node_state() != NodeLeecherService.State.SyncingAudit
# Prevent lagging node from catching up domain ledger (and finishing catchup)
with delay_rules(other_stashers, delay_domain_ledger_catchup()):
# Start catchup on lagging node
lagging_node.start_catchup()
assert lagging_node_state() == NodeLeecherService.State.SyncingAudit
# Ensure that audit ledger is caught up by lagging node
looper.run(eventually(check_lagging_node_is_not_syncing_audit))
assert lagging_node_state() != NodeLeecherService.State.Idle
# Order request on all nodes except lagging one where they goes to stashed state
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
# Now catchup should end and lagging node starts processing stashed PPs
# and resumes ordering
# ensure that all nodes will have same data after that
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
# ensure that no suspicions about obsolete PP have been raised
assert get_timestamp_suspicion_count(lagging_node) == 0
|
cherry/envs/action_space_scaler_wrapper.py | acse-yl27218/cherry | 160 | 12712291 | <gh_stars>100-1000
#!/usr/bin/env python3
import gym
import numpy as np
from .base import Wrapper
class ActionSpaceScaler(Wrapper):
"""
Scales the action space to be in the range (-clip, clip).
Adapted from Vitchyr Pong's RLkit:
https://github.com/vitchyr/rlkit/blob/master/rlkit/envs/wrappers.py#L41
"""
def __init__(self, env, clip=1.0):
super(ActionSpaceScaler, self).__init__(env)
self.env = env
self.clip = clip
ub = np.ones(self.env.action_space.shape) * clip
self.action_space = gym.spaces.Box(-1 * ub, ub, dtype=np.float32)
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def _normalize(self, action):
lb = self.env.action_space.low
ub = self.env.action_space.high
scaled_action = lb + (action + self.clip) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
return scaled_action
def step(self, action):
if self.is_vectorized:
action = [self._normalize(a) for a in action]
else:
action = self._normalize(action)
return self.env.step(action)
|
egs/ifnenit/v1/local/transcript_to_latin.py | shuipi100/kaldi | 805 | 12712326 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This script is originally from qatip project (http://qatsdemo.cloudapp.net/qatip/demo/)
# of Qatar Computing Research Institute (http://qcri.qa/)
# Convert every utterance transcript to position dependent latin format using "data/train/words2latin" as dictionary.
import os, sys, re, io
with open(sys.argv[1], encoding="utf-8") as f:
d = dict(x.rstrip().split(None, 1) for x in f)
in_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
for line in in_stream:
mappedWords = []
for word in line.split():
mappedWords.append(d[word])
sys.stdout.write(re.sub(" +", " ", " ~A ".join(mappedWords).strip()) + "\n")
|
Beginers/ExampleExceptions/C_finally_.py | arunkgupta/PythonTrainingExercises | 150 | 12712363 | <gh_stars>100-1000
#!/usr/bin/env python
"""Example of raising an exception where b() has a finally clause and a()
catches the exception.
Created on Aug 19, 2011
@author: paulross
"""
class ExceptionNormal(Exception):
pass
class ExceptionCleanUp(Exception):
pass
def a():
try:
b()
except ExceptionNormal as err:
print(' a(): CAUGHT "%s"' % err)
def b():
try:
c()
finally:
print(' b(): finally: This code is always executed.')
def c():
print('Raising "ExceptionNormal" from c()')
raise ExceptionNormal('ExceptionNormal raised from function c()')
def main():
a()
return 0
if __name__ == '__main__':
main()
|
console/scan_retention.py | RishiKumarRay/scantron | 684 | 12712370 | <reponame>RishiKumarRay/scantron<gh_stars>100-1000
#!/usr/bin/env python
# Standard Python libraries.
import argparse
import datetime
import glob
import logging
import os
import sys
# Third party Python libraries.
import django
# Custom Python libraries.
import django_connector
# Setup logging.
ROOT_LOGGER = logging.getLogger()
LOG_FORMATTER = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
def delete_files_in_dir(folder):
"""Delete all the files in a directory."""
logging.info("Deleting files in folder: {}".format(folder))
file_list = os.listdir(folder)
for f in file_list:
os.remove(os.path.join(folder, f))
def main(
database_remove, file_remove, scan_retention_in_minutes, max_queryset_size_to_delete, disable_dryrun, verbosity,
):
"""Execute main function."""
# Assign log level.
ROOT_LOGGER.setLevel((6 - verbosity) * 10)
# Setup file logging.
script_name = os.path.basename(os.path.abspath(__file__))
log_file_handler = logging.FileHandler(f"{script_name.split('.py')[0]}.log")
log_file_handler.setFormatter(LOG_FORMATTER)
ROOT_LOGGER.addHandler(log_file_handler)
# Setup console logging.
console_handler = logging.StreamHandler()
console_handler.setFormatter(LOG_FORMATTER)
ROOT_LOGGER.addHandler(console_handler)
ROOT_LOGGER.info(f"Starting {script_name} script.")
if not django_connector.Configuration.objects.filter(id=1)[0].enable_scan_retention:
ROOT_LOGGER.info("Scan retention is disabled. Exiting...")
return
ROOT_LOGGER.info(f"Disable dryrun setting: {disable_dryrun}")
# Utilize Django's timezone aware setting to return a datetime object.
now = django.utils.timezone.now()
# Retrieve scan retention value from Configuration if it is not specified.
# 60 * 24 = 1440 minutes in a day.
if not scan_retention_in_minutes:
scan_retention_in_minutes = (60 * 24) * (django_connector.Configuration.objects.all()[0].scan_retention_in_days)
ROOT_LOGGER.info(f"Removing scans older than {scan_retention_in_minutes} minutes.")
# Calculate the datetime "scan_retention_in_minutes" ago in the past.
datetime_retention_in_minutes = now - datetime.timedelta(minutes=scan_retention_in_minutes)
# Initialize scan_retention_dict as empty dictionary.
scan_retention_dict = {}
# Filter for scans that meet the retention criteria.
scans_older_than_retention_date = django_connector.ScheduledScan.objects.filter(
scan_status__in=["cancelled", "completed", "error"]
).filter(completed_time__lt=datetime_retention_in_minutes)
# Remove the files first, since they depend on the scans "result_file_base_name" attribute existing.
if file_remove:
# Build directory paths.
root_dir = "/home/scantron/console"
target_files_dir = os.path.join(root_dir, "target_files")
complete_dir = os.path.join(root_dir, "scan_results", "complete")
processed_dir = os.path.join(root_dir, "scan_results", "processed")
cancelled_dir = os.path.join(root_dir, "scan_results", "cancelled")
bigdata_analytics_dir = os.path.join(root_dir, "for_bigdata_analytics")
# Loop through each scan.
for scan in scans_older_than_retention_date:
result_file_base_name = scan.result_file_base_name
# Grab a list of files from the "target_files" directory. Will capture any .excluded_targets as well.
target_files = glob.glob(os.path.join(target_files_dir, f"{result_file_base_name}.*targets"))
# Grab a list of files from the "complete" directory.
complete_dir_scans = glob.glob(os.path.join(complete_dir, f"{result_file_base_name}*"))
# Grab a list of files from the "processed" directory.
processed_dir_scans = glob.glob(os.path.join(processed_dir, f"{result_file_base_name}*"))
# Grab a list of files from the "cancelled" directory.
cancelled_dir_scans = glob.glob(os.path.join(cancelled_dir, f"{result_file_base_name}*"))
# Grab a list of .csv files from the "for_bigdata_analytics" directory.
bigdata_analytics_dir_csv_files = glob.glob(
os.path.join(bigdata_analytics_dir, f"{result_file_base_name}.csv")
)
for file_to_delete in (
target_files
+ complete_dir_scans
+ processed_dir_scans
+ cancelled_dir_scans
+ bigdata_analytics_dir_csv_files
):
ROOT_LOGGER.debug(f"Deleting file: {file_to_delete}")
if disable_dryrun:
try:
os.remove(file_to_delete)
ROOT_LOGGER.debug(f"Deleted file: {file_to_delete}")
except OSError:
ROOT_LOGGER.error(f"Could not delete file: {file_to_delete}")
if database_remove:
# Determine the total number of scans to delete.
scans_older_than_retention_date_size = scans_older_than_retention_date.count()
ROOT_LOGGER.info(f"{scans_older_than_retention_date_size} scans will be removed from the database.")
if disable_dryrun:
if scans_older_than_retention_date_size < (max_queryset_size_to_delete + 1):
scan_retention_dict["database"] = ()
try:
database_result = scans_older_than_retention_date.delete()
scan_retention_dict["database"] = database_result
ROOT_LOGGER.info(
f"Successfully deleted {scans_older_than_retention_date_size} scans from the database."
)
except Exception as e:
ROOT_LOGGER.exception(f"Problem deleting scans from database using .delete(). Exception: {e}")
else:
ROOT_LOGGER.warning(
f"The number of scans to delete ({scans_older_than_retention_date_size}) is greater than the "
f"specified max_queryset_size_to_delete ({max_queryset_size_to_delete}). Using an iterator for "
"better memory management."
)
# Utilize an iterator for better memory management.
# https://medium.com/@hansonkd/performance-problems-in-the-django-orm-1f62b3d04785
total_iterator_scans_deleted = 0
for scan in scans_older_than_retention_date.iterator():
try:
# Capture scan ID.
scan_id = scan.id
scan.delete()
ROOT_LOGGER.debug(f"Scan ID successfully deleted: {scan_id}")
total_iterator_scans_deleted += 1
except Exception as e:
ROOT_LOGGER.exception(f"Problem deleting scan from database using iterator(). Exception: {e}")
ROOT_LOGGER.info(f"Successfully deleted {total_iterator_scans_deleted} scans from the database.")
ROOT_LOGGER.info(f"scan_retention_dict: {scan_retention_dict}")
ROOT_LOGGER.info(f"{script_name} is done!")
return scan_retention_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Remove scan data, targets, and results older than a specified date.")
parser.add_argument(
"-b",
dest="database_remove",
action="store_true",
required=False,
default=False,
help="Remove scan database entries.",
)
parser.add_argument(
"-c",
dest="file_remove",
action="store_true",
required=False,
default=False,
help=(
"Remove target_files/*.targets, target_files/*.excluded_targets, scan_results/*, and "
"for_bigdata_analytics/*.csv files"
),
)
parser.add_argument(
"-o",
dest="scan_retention_in_minutes",
action="store",
required=False,
type=int,
help="Delete emails older than X minutes. WARNING: Overrides the configuration setting.",
)
parser.add_argument(
"-m",
dest="max_queryset_size_to_delete",
action="store",
required=False,
type=int,
default=500,
help=(
"Max number of records to try and delete through Django's ORM .delete() function, otherwise a memory "
"efficient iterator must be used."
),
)
parser.add_argument(
"-r", dest="disable_dryrun", action="store_true", required=False, default=False, help="Disable dryrun option."
)
parser.add_argument(
"-v",
dest="verbosity",
action="store",
type=int,
default=4,
help="Verbosity level (0=NOTSET, 1=CRITICAL, 2=ERROR, 3=WARNING, 4=INFO, 5=DEBUG,). Default: 4",
)
args = parser.parse_args()
if (args.scan_retention_in_minutes is not None) and (args.scan_retention_in_minutes <= 0):
print("Scan retention in days must be greater than 0...exiting.")
sys.exit(0)
main(**vars(args))
|
tests/components/media_player/__init__.py | MrDelik/core | 30,023 | 12712390 | <filename>tests/components/media_player/__init__.py
"""The tests for Media player platforms."""
|
h2o-py/tests/testdir_algos/gbm/pyunit_ecology_gbm.py | ahmedengu/h2o-3 | 6,098 | 12712433 | <filename>h2o-py/tests/testdir_algos/gbm/pyunit_ecology_gbm.py
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import pandas
from sklearn import ensemble
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def ecologyGBM():
ecology_train = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
ntrees = 100
max_depth = 5
min_rows = 10
learn_rate = 0.1
# Prepare data for scikit use
trainData = pandas.read_csv(pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
trainData.dropna(inplace=True)
le = preprocessing.LabelEncoder()
le.fit(trainData['Method'])
trainData['Method'] = le.transform(trainData['Method'])
trainDataResponse = trainData["Angaus"]
trainDataFeatures = trainData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
ecology_train["Angaus"] = ecology_train["Angaus"].asfactor()
# Train H2O GBM Model:
gbm_h2o = H2OGradientBoostingEstimator(ntrees=ntrees,
learn_rate=learn_rate,
distribution="bernoulli",
min_rows=min_rows,
max_depth=max_depth,
categorical_encoding='label_encoder')
gbm_h2o.train(x=list(range(2,ecology_train.ncol)), y="Angaus", training_frame=ecology_train)
# Train scikit GBM Model:
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(trainDataFeatures,trainDataResponse)
# Evaluate the trained models on test data
# Load the test data (h2o)
ecology_test = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"))
# Load the test data (scikit)
testData = pandas.read_csv(pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"))
testData.dropna(inplace=True)
testData['Method'] = le.transform(testData['Method'])
testDataResponse = testData["Angaus"]
testDataFeatures = testData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
# Score on the test data and compare results
# scikit
auc_sci = roc_auc_score(testDataResponse, gbm_sci.predict_proba(testDataFeatures)[:,1])
# h2o
gbm_perf = gbm_h2o.model_performance(ecology_test)
auc_h2o = gbm_perf.auc()
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(ecologyGBM)
else:
ecologyGBM()
|
libs/models.py | mehrdad-shokri/lightbulb-framework | 497 | 12712453 | <gh_stars>100-1000
"""
This file contains all the in-memory Database implementation
of the Burp Extension. The contained models are used for
maintaining the Burp Proxy requests and responses, the lightbulb's
filters (regexes, grammars), the lightbulb's trees, the user's
campaigns, and other info. The models also include all the
necessary functionality for representing the data to the end user
through Jython swing framework.
"""
from java.util import ArrayList
from java.lang import Boolean
from javax.swing import JTabbedPane
from javax.swing import JTable
from javax.swing import JCheckBox
from javax.swing import JLabel
from javax.swing.table import AbstractTableModel
from javax.swing.table import TableCellRenderer
from threading import Lock
from threading import Thread
import sys
class BurpDatabaseModels():
"""
The in-memory Database implementation
of the Burp Extension.
"""
def __init__(self):
"""
The constructor of BurpDatabaseModels object defines a number of
class variables tracking the number of deleted records, and maintaining
the references to the arrays of records.
Args:
None
Returns:
None
"""
self.STATIC_MESSAGE_TABLE_COLUMN_COUNT = 6
self.lock = Lock()
self.arrayOfMessages = ArrayList()
self.arrayOfCampaigns = ArrayList()
self.arrayOfSettings = ArrayList()
self.deletedCampaignCount = 0
self.deletedRoleCount = 0
self.deletedMessageCount = 0
self.deletedSettingCount = 0
self.selfExtender = None
def addCampaign(self, name):
"""
Adds a new record in the Campaigns array,
and returns the index of the campaign.
Args:
name (str): The name of the campaign
Returns:
int: The index of the inserted campaign
"""
campaign_index = -1
try:
self.lock.acquire()
campaign_index = self.arrayOfCampaigns.size()
self.arrayOfCampaigns.append(
CampaignEntry(
campaign_index,
campaign_index - self.deletedCampaignCount,
name))
finally:
self.lock.release()
return campaign_index
def updateCampaign(self, campaign_index, id, val):
"""
Adds a new record in the Campaigns array,
and returns the index of the campaign.
Args:
name (str): The name of the campaign
Returns:
int: The index of the inserted campaign
"""
try:
self.lock.acquire()
campaign_entry = self.arrayOfCampaigns[campaign_index]
if campaign_entry:
if id == 1:
campaign_entry._Membership = val
if id == 2:
campaign_entry._MembershipB = val
except:
print 'Error when inserting campaings. Wanted to insert campaign with id ',campaign_index
size = self.arrayOfCampaigns.size()
print 'Current campaigns:',size
for i in self.arrayOfCampaigns:
print 'Campaign with id ',i._index
print sys.exc_info()
finally:
self.lock.release()
def delete_campaign(self, campaignIndex):
"""
Delete the selected row of the campaigns
Args:
row (str): The row of the table that the campaign appears
Returns:
None
"""
try:
self.lock.acquire()
print 'Terminating Campaign'
campaign_entry = self.arrayOfCampaigns[campaignIndex]
if campaign_entry:
campaign_entry._deleted = True
campaign_entry._Result = "Terminated"
self.deletedCampaignCount += 1
if len(self.arrayOfCampaigns) > campaignIndex + 1:
for i in self.arrayOfCampaigns[campaignIndex + 1:]:
i._tableRow -= 1
print 'Campaign was Terminated'
finally:
self.lock.release()
def addSetting(self, name, value, domain=None,
description=None, path=None):
"""
Adds a new record in the settings array,
and returns the index of the setting.
Args:
name (str): The name of the setting
value (str): The value of the setting
domain (str): The category of the setting (optional)
description (str): A small description of the setting (optional)
path (str): The location of the described resource (optional)
Returns:
int: The index of the inserted setting
"""
self.lock.acquire()
settingIndex = -1
for i in self.getActiveSettingIndexes(domain):
if self.arrayOfSettings[i]._name == name:
settingIndex = i
if settingIndex < 0:
settingIndex = self.arrayOfSettings.size()
self.arrayOfSettings.append(
SettingEntry(
settingIndex, len(
self.getActiveSettingIndexes(domain)), name, value, domain, description, path))
self.lock.release()
return settingIndex
def createNewMessage(
self,
messagebuffer,
host,
method,
path,
selectedparameter,
totest=False,
regex="HTTP/1.1 200 OK",
failRegex="(HTTP/1.1 403|block|impact)"):
"""
Adds a new record in the messages array,
and returns the index of the message.
Args:
messagebuffer (str): The saved buffer of the burp request-response
host (str): The targetted host
method (str): The used HTTP method
path (str): The HTTP URL path
selectedparameter (str): The parameter of the HTTP request
totest (bool): A boolean vaule indicating weather to test with the regex
the response or not
regex (str): A regex that if matches the response indicates a success
failRegex (str): A regex that if matches the response indicates a failure
Returns:
int: The index of the inserted message
"""
self.lock.acquire()
messageIndex = self.arrayOfMessages.size()
self.arrayOfMessages.add(
MessageEntry(
messageIndex,
messageIndex -
self.deletedMessageCount,
messagebuffer,
host,
method,
path,
selectedparameter,
regex,
failRegex))
self.lock.release()
if totest:
t = Thread(
target=self.selfExtender.runMessagesThread,
args=[messageIndex])
t.start()
return messageIndex
def clear(self):
"""
Clears all arrays and all counters
Args:
None
Returns:
None
"""
self.lock.acquire()
self.arrayOfMessages = ArrayList()
self.arrayOfCampaigns = ArrayList()
self.deletedCampaignCount = 0
self.deletedRoleCount = 0
self.deletedMessageCount = 0
self.lock.release()
def getActiveCampaignIndexes(self):
"""
Gets all campaigns that are not deleted
Args:
None
Returns:
array: The active campaigns
"""
return [x._index for x in self.arrayOfCampaigns if not x.isDeleted()]
def getActiveMessageIndexes(self):
"""
Gets all messages that are not deleted
Args:
None
Returns:
array: The active messages
"""
return [x._index for x in self.arrayOfMessages if not x.isDeleted()]
def getActiveSettingIndexes(self, domain=None):
"""
Gets all settings that are not deleted and belong to the input category
Args:
domain (str): The category of the requested settings (optional)
Returns:
array: The returned settings
"""
return [
x._index for x in self.arrayOfSettings if not x.isDeleted() and (
not domain or x._domain == domain)]
def getMessageByRow(self, row):
"""
Gets a selected row of the messages, as long as it is not deleted.
Args:
row (str): The row of the table that the message appears
Returns:
array: The returned row with the message details
"""
for m in self.arrayOfMessages:
if not m.isDeleted() and m.getTableRow() == row:
return m
def getSettingByRow(self, row, domain=None):
"""
Gets the a selected row of the settings, as long as it is not deleted.
Args:
row (str): The row of the table that the message appears
Returns:
array: The returned row with the setting details
"""
for m in [
x for x in self.arrayOfSettings if (
not domain or x._domain == domain)]:
if not m.isDeleted() and m.getTableRow() == row and (
not domain or m._domain == domain):
return m
def getCampaignByRow(self, row):
"""
Gets the a selected row of the campaigns, as long as it is not deleted.
Args:
row (str): The row of the table that the message appears
Returns:
array: The returned row with the campaign details
"""
for u in self.arrayOfCampaigns:
if not u.isDeleted() and u.getTableRow() == row:
return u
def delete_message(self, messageIndex):
"""
Delete the selected row of the messages
Args:
row (str): The row of the table that the message appears
Returns:
None
"""
self.lock.acquire()
messageEntry = self.arrayOfMessages[messageIndex]
if messageEntry:
messageEntry._deleted = True
self.deletedMessageCount += 1
if len(self.arrayOfMessages) > messageIndex + 1:
for i in self.arrayOfMessages[messageIndex + 1:]:
i._tableRow -= 1
self.lock.release()
"""
Swing Table Modles
"""
class CampaignTableModel(AbstractTableModel):
"""
The table model for the campaings, with the
getters and the setters.
"""
def __init__(self, db):
"""
The constructor of the model
Args:
db (object): The reference to the database instance.
Returns:
None
"""
self._db = db
def getRowCount(self):
"""
Returns the total number of table records that are not deleted.
Args:
None
Returns:
int: The total number of table records that are not deleted.
"""
try:
return len(self._db.getActiveCampaignIndexes())
except:
print 'error in campaign table model'
return 0
def getColumnCount(self):
"""
Returns the total number of table columns.
Args:
None
Returns:
int: The total number of table columns.
"""
return 4
def getColumnName(self, columnIndex):
"""
Returns the name of a selected column
Args:
columnIndex (int): The index of the column.
Returns:
str: The name of the column.
"""
if columnIndex == 0:
return "Campaigns"
elif columnIndex == 1:
return "Queries A"
elif columnIndex == 2:
return "Queries B"
elif columnIndex == 3:
return "Results"
return ""
def getValueAt(self, rowIndex, columnIndex):
"""
Returns the value of a selected row and a selected column
Args:
rowIndex (int): The index of the row.
columnIndex (int): The index of the column.
Returns:
str: The value of the record
"""
CampaignEntry = self._db.getCampaignByRow(rowIndex)
if CampaignEntry:
if columnIndex == 0:
return str(CampaignEntry._name)
elif columnIndex == 1:
return CampaignEntry._Membership
elif columnIndex == 2:
return CampaignEntry._MembershipB
elif columnIndex == 3:
return CampaignEntry._Result
return ""
def addRow(self, row):
"""
Notifies all listeners that the row was inserted.
Args:
row (int): The index of the inserted row
Returns:
None
"""
self.fireTableRowsInserted(row, row)
def setValueAt(self, val, row, col):
"""
Sets the selected value in a selected row and a selected column record
and notifies the listeners for the change.
Args:
val (depends on the class type of the record): The selected value
row (int): The index of the row.
col (int): The index of the column.
Returns:
None
"""
CampaignEntry = self._db.getCampaignByRow(row)
if CampaignEntry:
if col == 0:
CampaignEntry._name = val
elif col == 1:
CampaignEntry._Membership = val
elif col == 2:
CampaignEntry._MembershipB = val
elif col == 3:
CampaignEntry._Result = val
self.fireTableCellUpdated(row, col)
def isCellEditable(self, row, col):
"""
Checks whether the value can be edited.
Args:
row (int): The index of the selected row
col (int): The index of the selected column
Returns:
bool: A boolean value indicating whether the value is editable
"""
return False
def getColumnClass(self, columnIndex):
"""
Returns the class type of the record of a selected column.
Args:
columnIndex (int): The index of the selected column
Returns:
str: The class type of the column's records
"""
if columnIndex == 1:
return int
if columnIndex == 2:
return int
return str
class CampaignTable(JTable):
"""
The table for the campaigns, with functions
for its constructor and redrawing.
"""
def __init__(self, extender, model):
"""
The constructor of the table initiates the
extender and model class variables.
Args:
extender (burp extension): A self reference to the extension
model (abstract table class): The CampaignTableModel class
"""
self._extender = extender
self.setModel(model)
return
def redrawTable(self):
"""
This function configures the columns width.
"""
try:
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
self.getColumnModel().getColumn(0).setMinWidth(220)
self.getColumnModel().getColumn(0).setMaxWidth(220)
self.getColumnModel().getColumn(1).setMinWidth(125)
self.getColumnModel().getColumn(1).setMaxWidth(125)
self.getColumnModel().getColumn(2).setMinWidth(125)
self.getColumnModel().getColumn(2).setMaxWidth(125)
self.getColumnModel().getColumn(3).setMinWidth(150)
self.getColumnModel().getColumn(3).setMaxWidth(150)
self.getTableHeader().getDefaultRenderer().setHorizontalAlignment(JLabel.CENTER)
except:
pass
class LibraryTableModel(AbstractTableModel):
"""
The table model for the library, with the
getters and the setters.
"""
def __init__(self, db, domain, category=None):
"""
The constructor of the model
Args:
db (object): The reference to the database instance.
domain (str): The category of the related data.
category (str) The subcategory of the related data
Returns:
None
"""
self._db = db
self._domain = domain
self._category = category
def getRowCount(self):
"""
Returns the total number of table records that are not deleted.
Args:
None
Returns:
int: The total number of table records that are not deleted.
"""
try:
return len(self._db.getActiveSettingIndexes(self._domain))
except:
print 'error in LibraryTableModel'
return 0
def getColumnCount(self):
"""
Returns the total number of table columns.
Args:
None
Returns:
int: The total number of table columns.
"""
return 3
def getColumnName(self, columnIndex):
"""
Returns the name of a selected column
Args:
columnIndex (int): The index of the column.
Returns:
str: The name of the column.
"""
if columnIndex == 0:
return "Name"
elif columnIndex == 1:
return "description"
elif columnIndex == 2:
return "Value"
else:
return ""
return ""
def getValueAt(self, rowIndex, columnIndex):
"""
Returns the value of a selected row and a selected column
Args:
rowIndex (int): The index of the row.
columnIndex (int): The index of the column.
Returns:
str: The value of the record
"""
messageEntry = self._db.getSettingByRow(rowIndex, self._domain)
if messageEntry:
if columnIndex == 0:
return messageEntry._name
elif columnIndex == 1:
return messageEntry._description
elif columnIndex == 2:
if self._category == 1:
return messageEntry._val1
elif self._category == 2:
return messageEntry._val2
elif self._category == 3:
return messageEntry._val3
elif self._category == 4:
return messageEntry._val4
elif self._category == 5:
return messageEntry._val5
elif self._category == 6:
return messageEntry._val6
elif self._category == 7:
return messageEntry._val7
elif self._category == 8:
return messageEntry._val8
else:
return messageEntry._value
else:
return ""
return ""
def addRow(self, row):
"""
Notifies all listeners that the row was inserted.
Args:
row (int): The index of the inserted row
Returns:
None
"""
self.fireTableRowsInserted(row, row)
def setValueAt(self, val, row, col):
"""
Sets the selected value in a selected row and a selected column record
and notifies the listeners for the change.
Args:
val (depends on the class type of the record): The selected value
row (int): The index of the row.
col (int): The index of the column.
Returns:
None
"""
messageEntry = self._db.getSettingByRow(row, self._domain)
if col == 0:
messageEntry._name = val
elif col == 1:
messageEntry._description = val
elif col == 2:
if self._category == 1:
messageEntry._val1 = val
elif self._category == 2:
messageEntry._val2 = val
elif self._category == 3:
messageEntry._val3 = val
elif self._category == 4:
messageEntry._val4 = val
elif self._category == 5:
messageEntry._val5 = val
elif self._category == 6:
messageEntry._val6 = val
elif self._category == 7:
messageEntry._val7 = val
elif self._category == 8:
messageEntry._val8 = val
else:
messageEntry._value = val
self.fireTableCellUpdated(row, col)
def isCellEditable(self, row, col):
"""
Checks whether the value can be edited.
Args:
row (int): The index of the selected row
col (int): The index of the selected column
Returns:
bool: A boolean value indicating whether the value is editable
"""
if col == 2:
return True
return False
def getColumnClass(self, columnIndex):
"""
Returns the class type of the record of a selected column.
Args:
columnIndex (int): The index of the selected column
Returns:
str: The class type of the column's records
"""
if columnIndex == 2:
return Boolean
return str
class LibraryTable(JTable):
"""
The table for the library, with functions
for its constructor and redrawing.
"""
def __init__(self, extender, model):
"""
The constructor of the table
Args:
extender (burp extension): A self reference to the extension
model (abstract table class): The LibraryTableModel class
Returns:
None
"""
self._extender = extender
self.setModel(model)
return
def redrawTable(self):
"""
This function configures the columns width.
"""
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
self.getColumnModel().getColumn(0).setMinWidth(300)
self.getColumnModel().getColumn(0).setMaxWidth(300)
self.getColumnModel().getColumn(1).setMinWidth(400)
self.getColumnModel().getColumn(1).setMaxWidth(400)
self.getColumnModel().getColumn(2).setMinWidth(100)
self.getColumnModel().getColumn(2).setMaxWidth(100)
self.getTableHeader().getDefaultRenderer().setHorizontalAlignment(JLabel.CENTER)
class SettingsTableModel(AbstractTableModel):
"""
The table model for the settings, with the
getters and the setters.
"""
def __init__(self, db, domain):
"""
The constructor of the table
Args:
db (object): The reference to the database instance.
domain (str): The category of the related data.
Returns:
None
"""
self._db = db
self._domain = domain
def getRowCount(self):
"""
Returns the total number of table records that are not deleted.
Args:
None
Returns:
int: The total number of table records that are not deleted.
"""
try:
return len(self._db.getActiveSettingIndexes(self._domain))
except:
return 0
def getColumnCount(self):
"""
Returns the total number of table columns.
Args:
None
Returns:
int: The total number of table columns.
"""
return 2
def getColumnName(self, columnIndex):
"""
Returns the name of a selected column
Args:
columnIndex (int): The index of the column.
Returns:
str: The name of the column.
"""
if columnIndex == 0:
return "Name"
elif columnIndex == 1:
return "Value"
else:
return ""
return ""
def getValueAt(self, rowIndex, columnIndex):
"""
Returns the value of a selected row and a selected column
Args:
rowIndex (int): The index of the row.
columnIndex (int): The index of the column.
Returns:
str: The value of the record
"""
messageEntry = self._db.getSettingByRow(rowIndex, self._domain)
if messageEntry:
if columnIndex == 0:
return messageEntry._name
elif columnIndex == 1:
return messageEntry._value
else:
return ""
return ""
def addRow(self, row):
"""
Notifies all listeners that the row was inserted.
Args:
row (int): The index of the inserted row
Returns:
None
"""
self.fireTableRowsInserted(row, row)
def setValueAt(self, val, row, col):
"""
Sets the selected value in a selected row and a selected column record
and notifies the listeners for the change.
Args:
val (depends on the class type of the record): The selected value
row (int): The index of the row.
col (int): The index of the column.
Returns:
None
"""
messageEntry = self._db.getSettingByRow(row, self._domain)
if col == 0:
messageEntry._name = val
elif col == 1:
messageEntry._value = val
self.fireTableCellUpdated(row, col)
def isCellEditable(self, row, col):
"""
Checks whether the value can be edited.
Args:
row (int): The index of the selected row
col (int): The index of the selected column
Returns:
bool: A boolean value indicating whether the value is editable
"""
if col == 1:
return True
return False
def getColumnClass(self, columnIndex):
"""
Returns the class type of the record of a selected column.
Args:
columnIndex (int): The index of the selected column
Returns:
str: The class type of the column's records
"""
return str
class SettingsTable(JTable):
"""
The table for the settings, with functions
for its constructor and redrawing.
"""
def __init__(self, extender, model):
"""
The constructor of the table
Args:
extender (burp extension): A self reference to the extension
model (abstract table class): The SettingsTableModel class
Returns:
None
"""
self._extender = extender
self.setModel(model)
return
def redrawTable(self):
"""
This function configures the columns width.
"""
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
self.getColumnModel().getColumn(0).setMinWidth(200)
self.getColumnModel().getColumn(0).setMaxWidth(200)
self.getColumnModel().getColumn(1).setMinWidth(200)
self.getColumnModel().getColumn(1).setMaxWidth(200)
self.getTableHeader().getDefaultRenderer().setHorizontalAlignment(JLabel.CENTER)
class MessageTableModel(AbstractTableModel):
"""
The table model for the messages, with the
getters and the setters.
"""
def __init__(self, db):
"""
The constructor of the table
Args:
db (object): The reference to the database instance.
Returns:
None
"""
self._db = db
def getRowCount(self):
"""
Returns the total number of table records that are not deleted.
Args:
None
Returns:
int: The total number of table records that are not deleted.
"""
try:
return len(self._db.getActiveMessageIndexes())
except:
return 0
def getColumnCount(self):
"""
Returns the total number of table columns.
Args:
None
Returns:
int: The total number of table columns.
"""
return 7
def getColumnName(self, columnIndex):
"""
Returns the name of a selected column
Args:
columnIndex (int): The index of the column.
Returns:
str: The name of the column.
"""
if columnIndex == 0:
return "ID"
elif columnIndex == 1:
return "Host"
elif columnIndex == 2:
return "Method"
elif columnIndex == 3:
return "URL"
elif columnIndex == 4:
return "Success Regex"
elif columnIndex == 5:
return "Fail Regex"
elif columnIndex == 6:
return "Success Status"
return ""
def getValueAt(self, rowIndex, columnIndex):
"""
Returns the value of a selected row and a selected column
Args:
rowIndex (int): The index of the row.
columnIndex (int): The index of the column.
Returns:
str: The value of the record
"""
messageEntry = self._db.getMessageByRow(rowIndex)
if messageEntry:
if columnIndex == 0:
return str(messageEntry.getTableRow())
elif columnIndex == 1:
return messageEntry._host
elif columnIndex == 2:
return messageEntry._method
elif columnIndex == 3:
return messageEntry._name
elif columnIndex == 4:
return messageEntry._successRegex
elif columnIndex == 5:
return messageEntry._failRegex
elif columnIndex == 6:
return messageEntry._successStatus
return ""
def addRow(self, row):
"""
Notifies all listeners that the row was inserted.
Args:
row (int): The index of the inserted row
Returns:
None
"""
self.fireTableRowsInserted(row, row)
def setValueAt(self, val, row, col):
"""
Sets the selected value in a selected row and a selected column record
and notifies the listeners for the change.
Args:
val (depends on the class type of the record): The selected value
row (int): The index of the row.
col (int): The index of the column.
Returns:
None
"""
messageEntry = self._db.getMessageByRow(row)
if col == 1:
messageEntry._host = val
elif col == 2:
messageEntry._method = val
elif col == 3:
messageEntry._name = val
elif col == 4:
messageEntry._successRegex = val
elif col == 5:
messageEntry._failRegex = val
elif col == 6:
messageEntry._successStatus = val
else:
roleIndex = self._db.getRoleByMColumn(col)._index
messageEntry.addRoleByIndex(roleIndex, val)
self.fireTableCellUpdated(row, col)
def isCellEditable(self, row, col):
"""
Checks whether the value can be edited.
Args:
row (int): The index of the selected row
col (int): The index of the selected column
Returns:
bool: A boolean value indicating whether the value is editable
"""
if col == 5:
return True
if col == 4:
return True
return False
def getColumnClass(self, columnIndex):
"""
Returns the class type of the record of a selected column.
Args:
columnIndex (int): The index of the selected column
Returns:
str: The class type of the column's records
"""
if columnIndex < 6:
return str
else:
return Boolean
class MessageTable(JTable):
"""
The table for the messages, with functions
for its constructor and redrawing.
"""
def __init__(self, extender, model):
"""
The constructor of the table
Args:
extender (burp extension): A self reference to the extension
model (abstract table class): The MessagesTableModel class
Returns:
None
"""
self._extender = extender
self.setModel(model)
return
def changeSelection(self, row, col, toggle, extend):
# show the message entry for the selected row
selectedMessage = self.getModel()._db.getMessageByRow(row)
self._extender._tabs.removeAll()
# NOTE: testing if .locked is ok here since its a manual operation
if self.getModel()._db.lock.locked():
# Provide some feedback on a click
self.redrawTable()
return
# Create original Request tab and set default tab to Request
# Then Create test tabs and set the default tab to Response for easy
# analysis
originalTab = self.createRequestTabs(selectedMessage._requestResponse)
originalTab.setSelectedIndex(0)
self._extender._tabs.addTab("Original", originalTab)
for campaignIndex in selectedMessage._campaignRuns.keys():
if not self.getModel()._db.arrayOfCampaigns[
campaignIndex].isDeleted():
tabname = str(
self.getModel()._db.arrayOfCampaigns[campaignIndex]._name)
self._extender._tabs.addTab(
tabname, self.createRequestTabs(
selectedMessage._campaignRuns[campaignIndex]))
self._extender._currentlyDisplayedItem = selectedMessage._requestResponse
JTable.changeSelection(self, row, col, toggle, extend)
return
def createRequestTabs(self, requestResponse):
requestTabs = JTabbedPane()
requestViewer = self._extender._callbacks.createMessageEditor(
self._extender, False)
responseViewer = self._extender._callbacks.createMessageEditor(
self._extender, False)
requestTabs.addTab("Request", requestViewer.getComponent())
requestTabs.addTab("Response", responseViewer.getComponent())
self._extender._callbacks.customizeUiComponent(requestTabs)
requestViewer.setMessage(requestResponse.getRequest(), True)
if requestResponse.getResponse():
responseViewer.setMessage(requestResponse.getResponse(), False)
requestTabs.setSelectedIndex(1)
return requestTabs
def redrawTable(self):
"""
This function configures the columns width.
"""
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
self.getColumnModel().getColumn(0).setMinWidth(30)
self.getColumnModel().getColumn(0).setMaxWidth(30)
self.getColumnModel().getColumn(1).setMinWidth(150)
self.getColumnModel().getColumn(1).setMaxWidth(150)
self.getColumnModel().getColumn(2).setMinWidth(60)
self.getColumnModel().getColumn(2).setMaxWidth(60)
self.getColumnModel().getColumn(3).setMinWidth(150)
self.getColumnModel().getColumn(3).setMaxWidth(150)
self.getColumnModel().getColumn(4).setMinWidth(150)
self.getColumnModel().getColumn(4).setMaxWidth(150)
self.getColumnModel().getColumn(5).setMinWidth(150)
self.getColumnModel().getColumn(5).setMaxWidth(150)
self.getColumnModel().getColumn(6).setMinWidth(100)
self.getColumnModel().getColumn(6).setMaxWidth(100)
class SuccessBooleanRenderer(JCheckBox, TableCellRenderer):
def __init__(self, db):
"""
The constructor of the renderer
Args:
db (object): The reference to the database instance.
Returns:
None
"""
self.setOpaque(True)
self.setHorizontalAlignment(JLabel.CENTER)
self._db = db
def getTableCellRendererComponent(
self,
table,
value,
isSelected,
hasFocus,
row,
column):
if value:
self.setSelected(True)
else:
self.setSelected(False)
if isSelected:
self.setForeground(table.getSelectionForeground())
self.setBackground(table.getSelectionBackground())
else:
self.setForeground(table.getForeground())
self.setBackground(table.getBackground())
return self
class MessageEntry:
"""
The schema for the row of messages table
"""
def __init__(
self,
index,
tableRow,
requestResponse,
host="",
method="",
name="",
selectedparameter="",
regex="^HTTP/1.1 200 OK",
failRegex="(^HTTP/1.1 403|block|impact)",
deleted=False,
status=True):
"""
The constructor for the MessageEntry record.
Args:
index (int): The index of the messages array for this record
tableRow (int): The index of the messages tables for this record
requestResponse (str): The saved buffer of the burp request-response
host (str): The targetted host
method (str): The used HTTP method
name (str): The HTTP URL path
selectedparameter (str): The parameter of the HTTP request
regex (str): A regex that if matches the response indicates a success
failRegex (str): A regex that if matches the response indicates a failure
deleted (bool): A boolean value indicating whether the record is deleted.
status (bool): A boolean value indicating whether the record is selected.
Returns:
None
"""
self._index = index
self._tableRow = tableRow
self._requestResponse = requestResponse
self._host = host
self._method = method
self._name = name
self._selectedparameter = selectedparameter
self._successRegex = regex
self._failRegex = failRegex
self._successStatus = status
self._deleted = deleted
self._campaignRuns = {}
self._roleResults = {}
return
def isDeleted(self):
"""
Returns a boolean value indicating if record is deleted.
Args:
None
Returns:
Bool: Valude indicating if record is deleted.
"""
return self._deleted
def updateTableRow(self, row):
"""
Changes the content of the row with the input
Args:
row (int): A new table index
Returns:
None
"""
self._tableRow = row
def getTableRow(self):
"""
Returns the current table index of the record
Args:
None
Returns:
int: Table index
"""
return self._tableRow
class SettingEntry:
"""
The schema for the row of settings table
"""
def __init__(
self,
index,
rowIndex,
name,
value="",
domain=None,
description=None,
path=None):
"""
The constructor for the MessageEntry record.
Args:
index (int): The index of the messages array for this record
rowIndex (int): The index of the messages tables for this record
requestResponse (str): The saved buffer of the burp request-response
name (str): The HTTP URL path
Returns:
None
"""
self._index = index
self._name = name
self._deleted = False
self._tableRow = rowIndex
self._value = value
self._val1 = value
self._val2 = value
self._val3 = value
self._val4 = value
self._val5 = value
self._val6 = value
self._val7 = value
self._val8 = value
self._path = path
self._domain = domain
self._description = description
return
def isDeleted(self):
"""
Returns a boolean value indicating if record is deleted.
Args:
None
Returns:
Bool: Valude indicating if record is deleted.
"""
return self._deleted
def updateTableRow(self, row):
"""
Changes the content of the row with the input
Args:
row (int): A new table index
Returns:
None
"""
self._tableRow = row
def getTableRow(self):
"""
Returns the current table index of the record
Args:
None
Returns:
int: Table index
"""
return self._tableRow
class CampaignEntry:
"""
The schema for the row of campaigns table
"""
def __init__(
self,
index,
rowIndex,
name,
Membership="0",
MembershipB="0",
Result="Running",
Stats="Campaing is still running..."):
self._index = index
self._name = name
self._Membership = Membership
self._MembershipB = MembershipB
self._Result = Result
self._Stats = Stats
self._deleted = False
self._tableRow = rowIndex
self.thread = None
self._inputlist = None
return
def isDeleted(self):
"""
Returns a boolean value indicating if record is deleted.
Args:
None
Returns:
Bool: Valude
"""
return self._deleted
def updateTableRow(self, row):
"""
Changes the content of the row with the input
Args:
row (int): A new table index
Returns:
None
"""
self._tableRow = row
def getTableRow(self):
"""
Returns the current table index of the record
Args:
None
Returns:
int: Table index
"""
return self._tableRow
|
src/betamax/serializers/base.py | santosh653/betamax | 226 | 12712464 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
NOT_IMPLEMENTED_ERROR_MSG = ('This method must be implemented by classes'
' inheriting from BaseSerializer.')
class BaseSerializer(object):
"""
Base Serializer class that provides an interface for other serializers.
Usage:
.. code-block:: python
from betamax import Betamax, BaseSerializer
class MySerializer(BaseSerializer):
name = 'my'
@staticmethod
def generate_cassette_name(cassette_library_dir, cassette_name):
# Generate a string that will give the relative path of a
# cassette
def serialize(self, cassette_data):
# Take a dictionary and convert it to whatever
def deserialize(self, cassette_data):
# Uses a cassette file to return a dictionary with the
# cassette information
Betamax.register_serializer(MySerializer)
The last line is absolutely necessary.
"""
name = None
stored_as_binary = False
@staticmethod
def generate_cassette_name(cassette_library_dir, cassette_name):
raise NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)
def __init__(self):
if not self.name:
raise ValueError("Serializer's name attribute must be a string"
" value, not None.")
self.on_init()
def on_init(self):
"""Method to implement if you wish something to happen in ``__init__``.
The return value is not checked and this is called at the end of
``__init__``. It is meant to provide the matcher author a way to
perform things during initialization of the instance that would
otherwise require them to override ``BaseSerializer.__init__``.
"""
return None
def serialize(self, cassette_data):
"""A method that must be implemented by the Serializer author.
:param dict cassette_data: A dictionary with two keys:
``http_interactions``, ``recorded_with``.
:returns: Serialized data as a string.
"""
raise NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)
def deserialize(self, cassette_data):
"""A method that must be implemented by the Serializer author.
The return value is extremely important. If it is not empty, the
dictionary returned must have the following structure::
{
'http_interactions': [{
# Interaction
},
{
# Interaction
}],
'recorded_with': 'name of recorder'
}
:params str cassette_data: The data serialized as a string which needs
to be deserialized.
:returns: dictionary
"""
raise NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)
|
google/cloud/aiplatform/training_utils/cloud_profiler/initializer.py | sakagarwal/python-aiplatform | 180 | 12712487 | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
from typing import Optional, Type
from google.cloud.aiplatform.training_utils.cloud_profiler import cloud_profiler_utils
try:
from werkzeug import serving
except ImportError as err:
raise ImportError(cloud_profiler_utils.import_error_msg) from err
from google.cloud.aiplatform.training_utils import environment_variables
from google.cloud.aiplatform.training_utils.cloud_profiler import webserver
from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import base_plugin
from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import (
tf_profiler,
)
# Mapping of available plugins to use
_AVAILABLE_PLUGINS = {"tensorflow": tf_profiler.TFProfiler}
class MissingEnvironmentVariableException(Exception):
pass
def _build_plugin(
plugin: Type[base_plugin.BasePlugin],
) -> Optional[base_plugin.BasePlugin]:
"""Builds the plugin given the object.
Args:
plugin (Type[base_plugin]):
Required. An uninitialized plugin class.
Returns:
An initialized plugin, or None if plugin cannot be
initialized.
"""
if not plugin.can_initialize():
logging.warning("Cannot initialize the plugin")
return
plugin.setup()
if not plugin.post_setup_check():
return
return plugin()
def _run_app_thread(server: webserver.WebServer, port: int):
"""Run the webserver in a separate thread.
Args:
server (webserver.WebServer):
Required. A webserver to accept requests.
port (int):
Required. The port to run the webserver on.
"""
daemon = threading.Thread(
name="profile_server",
target=serving.run_simple,
args=("0.0.0.0", port, server,),
)
daemon.setDaemon(True)
daemon.start()
def initialize(plugin: str = "tensorflow"):
"""Initializes the profiling SDK.
Args:
plugin (str):
Required. Name of the plugin to initialize.
Current options are ["tensorflow"]
Raises:
ValueError:
The plugin does not exist.
MissingEnvironmentVariableException:
An environment variable that is needed is not set.
"""
plugin_obj = _AVAILABLE_PLUGINS.get(plugin)
if not plugin_obj:
raise ValueError(
"Plugin {} not available, must choose from {}".format(
plugin, _AVAILABLE_PLUGINS.keys()
)
)
prof_plugin = _build_plugin(plugin_obj)
if prof_plugin is None:
return
server = webserver.WebServer([prof_plugin])
if not environment_variables.http_handler_port:
raise MissingEnvironmentVariableException(
"'AIP_HTTP_HANDLER_PORT' must be set."
)
port = int(environment_variables.http_handler_port)
_run_app_thread(server, port)
|
astroNN/models/misc_models.py | igomezv/astroNN | 156 | 12712505 | <reponame>igomezv/astroNN
# ---------------------------------------------------------#
# astroNN.models.misc_models: Contain Misc. Models
# ---------------------------------------------------------#
import tensorflow.keras as tfk
from astroNN.models.base_bayesian_cnn import BayesianCNNBase
from astroNN.models.base_cnn import CNNBase
from astroNN.nn.layers import MCDropout, PolyFit
from astroNN.nn.losses import bayesian_binary_crossentropy_wrapper, bayesian_binary_crossentropy_var_wrapper
from astroNN.nn.losses import bayesian_categorical_crossentropy_wrapper, bayesian_categorical_crossentropy_var_wrapper
regularizers = tfk.regularizers
Dense = tfk.layers.Dense
Input = tfk.layers.Input
Conv2D = tfk.layers.Conv2D
Dropout = tfk.layers.Dropout
Flatten = tfk.layers.Flatten
Activation = tfk.layers.Activation
concatenate = tfk.layers.concatenate
MaxPooling2D = tfk.layers.MaxPooling2D
Model = tfk.models.Model
MaxNorm = tfk.constraints.MaxNorm
class Cifar10CNN(CNNBase):
"""
NAME:
Cifar10CNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""
def __init__(self, lr=0.005):
"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""
super().__init__()
self._implementation_version = '1.0'
self.initializer = 'he_normal'
self.activation = 'relu'
self.num_filters = [8, 16]
self.filter_len = (3, 3)
self.pool_length = (4, 4)
self.num_hidden = [256, 128]
self.max_epochs = 30
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 1
self.l2 = 1e-4
self.dropout_rate = 0.1
self.task = 'classification'
self.targetname = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
self.input_norm_mode = 255
self.labels_norm_mode = 0
def model(self):
input_tensor = Input(shape=self._input_shape['input'], name='input')
cnn_layer_1 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[0],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(input_tensor)
activation_1 = Activation(activation=self.activation)(cnn_layer_1)
cnn_layer_2 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[1],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(activation_1)
activation_2 = Activation(activation=self.activation)(cnn_layer_2)
maxpool_1 = MaxPooling2D(pool_size=self.pool_length)(activation_2)
flattener = Flatten()(maxpool_1)
dropout_1 = Dropout(self.dropout_rate)(flattener)
layer_3 = Dense(units=self.num_hidden[0], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer)(dropout_1)
activation_3 = Activation(activation=self.activation)(layer_3)
dropout_2 = Dropout(self.dropout_rate)(activation_3)
layer_4 = Dense(units=self.num_hidden[1], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer, kernel_constraint=MaxNorm(2))(dropout_2)
activation_4 = Activation(activation=self.activation)(layer_4)
layer_5 = Dense(units=self._labels_shape['output'])(activation_4)
output = Activation(activation=self._last_layer_activation, name='output')(layer_5)
model = Model(inputs=input_tensor, outputs=output)
return model
# noinspection PyCallingNonCallable
class MNIST_BCNN(BayesianCNNBase):
"""
NAME:
MNIST_BCNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""
def __init__(self, lr=0.005):
"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""
super().__init__()
self._implementation_version = '1.0'
self.initializer = 'he_normal'
self.activation = 'relu'
self.num_filters = [8, 16]
self.filter_len = (3, 3)
self.pool_length = (4, 4)
self.num_hidden = [256, 128]
self.max_epochs = 30
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 1
self.l2 = 1e-4
self.dropout_rate = 0.1
self.task = 'classification'
self.targetname = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
self.input_norm_mode = 255
self.labels_norm_mode = 0
def model(self):
input_tensor = Input(shape=self._input_shape['input'], name='input')
cnn_layer_1 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[0],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(input_tensor)
activation_1 = Activation(activation=self.activation)(cnn_layer_1)
dropout_1 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_1)
cnn_layer_2 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[1],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(dropout_1)
activation_2 = Activation(activation=self.activation)(cnn_layer_2)
dropout_2 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_2)
maxpool_1 = MaxPooling2D(pool_size=self.pool_length)(dropout_2)
flattener = Flatten()(maxpool_1)
layer_3 = Dense(units=self.num_hidden[0], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer)(flattener)
activation_3 = Activation(activation=self.activation)(layer_3)
dropout_4 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_3)
layer_4 = Dense(units=self.num_hidden[1], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer, kernel_constraint=MaxNorm(2))(dropout_4)
activation_4 = Activation(activation=self.activation)(layer_4)
output = Dense(units=self._labels_shape['output'], activation='linear', name='output')(activation_4)
output_activated = Activation(self._last_layer_activation)(output)
variance_output = Dense(units=self._labels_shape['output'], activation='softplus', name='variance_output')(activation_4)
model = Model(inputs=[input_tensor], outputs=[output, variance_output])
# new astroNN high performance dropout variational inference on GPU expects single output
model_prediction = Model(inputs=[input_tensor], outputs=concatenate([output_activated, variance_output]))
if self.task == 'classification':
output_loss = bayesian_categorical_crossentropy_wrapper(variance_output)
variance_loss = bayesian_categorical_crossentropy_var_wrapper(output)
elif self.task == 'binary_classification':
output_loss = bayesian_binary_crossentropy_wrapper(variance_output)
variance_loss = bayesian_binary_crossentropy_var_wrapper(output)
else:
raise RuntimeError('Only "regression", "classification" and "binary_classification" are supported')
return model, model_prediction, output_loss, variance_loss
# noinspection PyCallingNonCallable
class SimplePolyNN(CNNBase):
"""
Class for Neural Network for Gaia Polynomial fitting
:History: 2018-Jul-23 - Written - <NAME> (University of Toronto)
"""
def __init__(self, lr=0.005, init_w=None, use_xbias=False):
super().__init__()
self._implementation_version = '1.0'
self.max_epochs = 40
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.num_hidden = 3 # equals degree of polynomial to fit
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 2
self.input_norm_mode = 0
self.labels_norm_mode = 0
self.init_w = init_w
self.use_xbias = use_xbias
self.task = 'regression'
self.targetname = ['unbiased_parallax']
def model(self):
input_tensor = Input(shape=self._input_shape, name='input')
flattener = Flatten()(input_tensor)
output = PolyFit(deg=self.num_hidden,
output_units=self._labels_shape,
use_xbias=self.use_xbias,
name='output',
init_w=self.init_w,
kernel_regularizer=regularizers.l2(self.l2))(flattener)
model = Model(inputs=input_tensor, outputs=output)
return model
|
exercises/concept/restaurant-rozalynn/.meta/exemplar.py | tamireinhorn/python | 1,177 | 12712510 | def new_seating_chart(size=22):
"""Create a new seating chart.
:param size: int - number if seats in the seating chart.
:return: dict - with number of seats specified, and placeholder values.
"""
return {number: None for number in range(1, size + 1)}
def arrange_reservations(guests=None):
"""Assign guests to seats.
:param guest_list: list - list of guest names for reservations.
:return: dict - Default sized dictionary with guests assigned seats,
and placeholders for empty seats.
"""
seats = new_seating_chart()
if guests:
for seat_number in range(1, len(guests)):
seats[seat_number] = guests[seat_number]
return seats
def find_all_available_seats(seats):
"""Find and return seat numbers that are unassigned.
:param seats: dict - seating chart.
:return: list - list of seat numbers available for reserving..
"""
available = []
for seat_num, value in seats.items():
if value is None:
available.append(seat_num)
return available
def current_empty_seat_capacity(seats):
"""Find the number of seats that are currently empty.
:param seats: dict - dictionary of reserved seats.
:return: int - number of seats empty.
"""
count = 0
for value in seats.values():
if value is None:
count += 1
return count
def accommodate_waiting_guests(seats, guests):
"""Asses if guest can be accommodated. Update seating if they can be.
:param seats: dict - seating chart dictionary.
:param guests: list - walk-in guests
:return: dict - updated seating chart with available spaces filled.
"""
curr_empty_seats = current_empty_seat_capacity(seats)
empty_seat_list = find_all_available_seats(seats)
if len(guests) <= curr_empty_seats:
for index, _ in enumerate(guests):
seats[empty_seat_list[index]] = guests[index]
return seats
def empty_seats(seats, seat_numbers):
"""Empty listed seats of their previous reservations.
:param seats: dict - seating chart dictionary.
:param seat_numbers: list - list of seat numbers to free up or empty.
:return: updated seating chart dictionary.
"""
for seat in seat_numbers:
seats[seat] = None
return seats
|
pyod/__init__.py | GBR-613/pyod | 5,126 | 12712536 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from . import models
from . import utils
# TODO: add version information here
__all__ = ['models', 'utils']
|
unfurl/parsers/parse_hash.py | jakuta-tech/unfurl | 449 | 12712575 | <reponame>jakuta-tech/unfurl
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from unfurl import utils
hash_edge = {
'color': {
'color': '#4A93AE'
},
'title': 'Hash Identification Functions',
'label': '#'
}
hash_lookup_edge = {
'color': {
'color': '#4A93AE'
},
'title': 'Hash Lookup Functions',
'label': '#'
}
def nitrxgen_md5_lookup(value):
response = requests.get(f'https://www.nitrxgen.net/md5db/{value}', verify=False).text
if response:
return response
else:
return False
def virustotal_lookup(unfurl, hash_value):
response = requests.get(f'https://www.virustotal.com/api/v3/files/{hash_value}',
headers={'x-apikey': unfurl.api_keys.get('virustotal')})
if response.status_code == 200:
try:
result = response.json()
return result['data']['attributes']
except:
return False
def decode_cisco_type_7(encoded_text):
cisco_constant = b"dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87"
try:
salt = int(encoded_text[0:2])
except ValueError:
# Valid salts should be ints; if not, move on.
return
try:
encoded = bytearray.fromhex(encoded_text[2:])
except ValueError:
# Not valid Type 7 encoded then; exit
return
plaintext = ''
for i in range(0, len(encoded)):
j = (i + salt) % 53
p = encoded[i] ^ cisco_constant[j]
plaintext += chr(p)
# If the result isn't readable as ASCII, call it a false positive and move on without adding a node.
try:
_ = plaintext.encode('ascii', errors='strict')
except UnicodeEncodeError:
return
return plaintext
def run(unfurl, node):
if node.data_type.startswith('uuid'):
return
if node.data_type.startswith('hash'):
if node.data_type == 'hash.md5' and unfurl.remote_lookups:
hash_plaintext = nitrxgen_md5_lookup(node.value)
if hash_plaintext:
unfurl.add_to_queue(
data_type=f'text', key='Plaintext',
value=hash_plaintext,
hover='Queried Nitrxgen database of MD5 hashes and found a matching plaintext value',
parent_id=node.node_id, incoming_edge_config=hash_lookup_edge)
if node.data_type in ('hash.md5', 'hash.sha-1', 'hash.sha-256') and unfurl.remote_lookups:
vt_results = virustotal_lookup(unfurl, node.value)
if vt_results:
label_text = 'Hash found on VirusTotal'
if vt_results.get("type_description"):
label_text += f'\nFile Type: {vt_results.get("type_description")};'
if vt_results.get("meaningful_name"):
label_text += f'\nName: {vt_results.get("meaningful_name")};'
if vt_results.get("reputation"):
label_text += f'\nReputation: {vt_results.get("reputation")};'
unfurl.add_to_queue(
data_type=f'text', key='Hash found on VirusTotal',
value=None, label=label_text,
hover='Queried VirusTotal with the hash value and found a match.',
parent_id=node.node_id, incoming_edge_config=hash_lookup_edge)
else:
if not isinstance(node.value, str):
return
# Filter for values that are only hex chars (A-F,0-9) and contains both a letter and number.
# This could conceivably filter out valid hashes, but will filter out many more invalid values.
if not (utils.hex_re.fullmatch(node.value) and
utils.digits_re.search(node.value) and utils.letters_re.search(node.value)):
return
# Cisco "Type 7" password encoding is very flexible, so detecting it is very false positive prone
# as it isn't a fixed length. However, decoding it is easy, so Unfurl will only "detect" something as
# using this encoding type if it can also decode it (as a method of verifying it).
# Ref: https://passlib.readthedocs.io/en/stable/lib/passlib.hash.cisco_type7.html
cisco_type_7_m = utils.cisco_7_re.fullmatch(node.value)
if cisco_type_7_m:
cisco_type_7_plaintext = decode_cisco_type_7(node.value)
if cisco_type_7_plaintext:
unfurl.add_to_queue(
data_type=f'text', key=f'Cisco "Type 7" encoding', value=cisco_type_7_plaintext,
label=f'Cisco "Type 7" encoding; plaintext is "{cisco_type_7_plaintext}"',
hover='Cisco "Type 7" password encoding is based<br> on XOR and is easily reversible '
'[<a hre="https://passlib.readthedocs.io/en/stable/lib/passlib.hash.cisco_type7.html">'
'ref</a>].', parent_id=node.node_id, incoming_edge_config=hash_edge)
return
if len(node.value) == 32 and node.value[12] == '4':
# UUIDv4 is very common and it's the same length as an MD5 hash. This might filter out some legitimate
# MD5 hashes, but it will filter out many more UUIDs. I think the tradeoff is worth it for Unfurl.
return
hash_name, hash_hover, new_node_value = None, None, None
if len(node.value) == 32:
hash_name = 'MD5'
hash_hover = f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'
if len(node.value) == 40:
hash_name = 'SHA-1'
hash_hover = f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'
if len(node.value) == 64:
hash_name = 'SHA-256'
hash_hover = f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'
if len(node.value) == 128:
hash_name = 'SHA-512'
hash_hover = f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'
if hash_name in ('MD5', 'SHA-1', 'SHA-256'):
# Pass through the values of three common file hashes for further analysis; don't send on the
# other types to avoid duplicate processing.
new_node_value = node.value
if hash_name:
unfurl.add_to_queue(
data_type=f'hash.{hash_name.lower()}', key=f'{hash_name} Hash',
value=new_node_value, label=f'Potential {hash_name} hash',
hover=hash_hover, parent_id=node.node_id, incoming_edge_config=hash_edge)
|
tests/testUtils.py | pir2/python-omniture | 105 | 12712615 | import datetime
import unittest
import omniture
class UtilsTest(unittest.TestCase):
def setUp(self):
fakelist = [{"id":"123", "title":"abc"},{"id":"456","title":"abc"}]
self.alist = omniture.Value.list("segemnts",fakelist,{})
def tearDown(self):
del self.alist
def test_addressable_list_repr_html_(self):
"""Test the _repr_html_ for AddressableList this is used in ipython """
outlist = '<table><tr><td><b>ID</b></td><td><b>Title</b></td></tr><tr><td><b>123</b></td><td>abc</td></tr><tr><td><b>456</b></td><td>abc</td></tr></table>'
self.assertEqual(self.alist._repr_html_(),outlist,\
"The _repr_html_ isn't working: {}"\
.format(self.alist._repr_html_()))
def test_addressable_list_str_(self):
"""Test _str_ method """
outstring = 'ID 123 | Name: abc \nID 456 | Name: abc \n'
self.assertEqual(self.alist.__str__(),outstring,\
"The __str__ isn't working: {}"\
.format(self.alist.__str__()))
def test_addressable_list_get_time(self):
""" Test the custom get item raises a problem when there are duplicate names """
with self.assertRaises(KeyError):
self.alist['abc']
def test_wrap(self):
"""Test the wrap method """
self.assertIsInstance(omniture.utils.wrap("test"),list)
self.assertIsInstance(omniture.utils.wrap(["test"]),list)
self.assertEqual(omniture.utils.wrap("test"),["test"])
self.assertEqual(omniture.utils.wrap(["test"]),["test"])
def test_date(self):
"""Test the Date Method"""
test_date = "2016-09-01"
self.assertEqual(omniture.utils.date(None), None)
self.assertEqual(omniture.utils.date(test_date).strftime("%Y-%m-%d"),
test_date)
d = datetime.date(2016,9,1)
self.assertEqual(omniture.utils.date(d).strftime("%Y-%m-%d"),
test_date)
t = datetime.datetime(2016,9,1)
self.assertEqual(omniture.utils.date(t).strftime("%Y-%m-%d"),
test_date)
self.assertEqual(omniture.utils.date(u"2016-09-01").strftime("%Y-%m-%d"),
test_date)
with self.assertRaises(ValueError):
omniture.utils.date({})
def test_affix(self):
"""Test the Affix method to make sure it handles things correctly"""
p = "pre"
s = "suf"
v = "val"
con = "+"
self.assertEqual(omniture.utils.affix(p,v,connector=con),
con.join([p,v]))
self.assertEqual(omniture.utils.affix(base=v,suffix=s,connector=con),
con.join([v,s]))
self.assertEqual(omniture.utils.affix(p,v,s,connector=con),
con.join([p,v,s]))
self.assertEqual(omniture.utils.affix(base=v,connector=con),
con.join([v]))
def test_translate(self):
"""Test the translate method """
t = {"product":"cat_collar", "price":100, "location":"no where"}
m = {"product":"Product_Name","price":"Cost","date":"Date"}
s = {"Product_Name":"cat_collar", "Cost":100, "location":"no where"}
self.assertEqual(omniture.utils.translate(t,m),s)
|
tests/http_schemas/test_base_schema.py | NickMitin/pyhttptest | 142 | 12712616 | import pytest
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from pyhttptest.http_schemas.base_schema import base_schema
def test_schema_with_valid_data():
data = {
'name': 'Test',
'verb': 'GET',
'endpoint': 'users',
'host': 'http://test.com',
}
result = validate(instance=data, schema=base_schema)
assert result is None
def test_schema_with_invalid_data():
with pytest.raises(ValidationError) as exc:
# Not including a required property 'endpoint'
# from the schema into the ``dict`` below
data = {
'name': 'Test',
'verb': 'GET',
'host': 'http://test.com',
}
validate(instance=data, schema=base_schema)
assert 'required property' in str(exc.value)
|
backend/src/baserow/core/tasks.py | cjh0613/baserow | 839 | 12712646 | from .trash.tasks import (
permanently_delete_marked_trash,
mark_old_trash_for_permanent_deletion,
setup_period_trash_tasks,
)
__all__ = [
"permanently_delete_marked_trash",
"mark_old_trash_for_permanent_deletion",
"setup_period_trash_tasks",
]
|
tests/assets/projekt/projekt.py | Lufedi/reaper | 106 | 12712647 | def projekt():
# Single line comment
print('RepoReapers')
|
torchtoolbox/data/__init__.py | deeplearningforfun/torch-tools | 353 | 12712680 | <reponame>deeplearningforfun/torch-tools<filename>torchtoolbox/data/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Author : DevinYang(<EMAIL>)
from .utils import *
from .lmdb_dataset import *
from .datasets import *
from .dataprefetcher import DataPreFetcher
from .dynamic_data_provider import *
from .sampler import *
|
assistive_gym/envs/agents/pr2.py | chstetco/assistive-gym | 216 | 12712695 | <gh_stars>100-1000
import os
import numpy as np
import pybullet as p
from .robot import Robot
class PR2(Robot):
def __init__(self, controllable_joints='right'):
right_arm_joint_indices = [42, 43, 44, 46, 47, 49, 50] # Controllable arm joints
left_arm_joint_indices = [64, 65, 66, 68, 69, 71, 72] # Controllable arm joints
wheel_joint_indices = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] # Controllable wheel joints
right_end_effector = 54 # Used to get the pose of the end effector
left_end_effector = 76 # Used to get the pose of the end effector
right_gripper_indices = [57, 58, 59, 60] # Gripper actuated joints
left_gripper_indices = [79, 80, 81, 82] # Gripper actuated joints
right_tool_joint = 54 # Joint that tools are attached to
left_tool_joint = 76 # Joint that tools are attached to
right_gripper_collision_indices = list(range(49, 64)) # Used to disable collision between gripper and tools
left_gripper_collision_indices = list(range(71, 86)) # Used to disable collision between gripper and tools
gripper_pos = {'scratch_itch': [0.25]*4, # Gripper open position for holding tools
'feeding': [0.03]*4,
'drinking': [0.45]*4,
'bed_bathing': [0.2]*4,
'dressing': [0]*4,
'arm_manipulation': [0.15]*4}
tool_pos_offset = {'scratch_itch': [0, 0, 0], # Position offset between tool and robot tool joint
'feeding': [0, -0.03, -0.11],
'drinking': [-0.01, 0, -0.05],
'bed_bathing': [0, 0, 0],
'arm_manipulation': [0.125, 0, -0.075]}
tool_orient_offset = {'scratch_itch': [0, 0, 0], # RPY orientation offset between tool and robot tool joint
'feeding': [-0.2, 0, 0],
'drinking': [np.pi/2.0, 0, 0],
'bed_bathing': [0, 0, 0],
'arm_manipulation': [np.pi/2.0, 0, 0]}
toc_base_pos_offset = {'scratch_itch': [0.1, 0, 0], # Robot base offset before TOC base pose optimization
'feeding': [0.1, 0.2, 0],
'drinking': [0.2, 0.2, 0],
'bed_bathing': [-0.1, 0, 0],
'dressing': [1.7, 0.7, 0],
'arm_manipulation': [-0.3, 0.7, 0]}
toc_ee_orient_rpy = {'scratch_itch': [0, 0, 0], # Initial end effector orientation
'feeding': [np.pi/2.0, 0, 0],
'drinking': [0, 0, 0],
'bed_bathing': [0, 0, 0],
'dressing': [[0, 0, np.pi], [0, 0, np.pi*3/2.0]],
'arm_manipulation': [0, 0, 0]}
wheelchair_mounted = False
super(PR2, self).__init__(controllable_joints, right_arm_joint_indices, left_arm_joint_indices, wheel_joint_indices, right_end_effector, left_end_effector, right_gripper_indices, left_gripper_indices, gripper_pos, right_tool_joint, left_tool_joint, tool_pos_offset, tool_orient_offset, right_gripper_collision_indices, left_gripper_collision_indices, toc_base_pos_offset, toc_ee_orient_rpy, wheelchair_mounted, half_range=False)
def init(self, directory, id, np_random, fixed_base=True):
self.body = p.loadURDF(os.path.join(directory, 'PR2', 'pr2_no_torso_lift_tall.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0], flags=p.URDF_USE_INERTIA_FROM_FILE, physicsClientId=id)
super(PR2, self).init(self.body, id, np_random)
# Recolor robot
for i in [19, 42, 64]:
p.changeVisualShape(self.body, i, rgbaColor=[1.0, 1.0, 1.0, 1.0], physicsClientId=id)
for i in [43, 46, 49, 58, 60, 65, 68, 71, 80, 82]:
p.changeVisualShape(self.body, i, rgbaColor=[0.4, 0.4, 0.4, 1.0], physicsClientId=id)
for i in [45, 51, 67, 73]:
p.changeVisualShape(self.body, i, rgbaColor=[0.7, 0.7, 0.7, 1.0], physicsClientId=id)
p.changeVisualShape(self.body, 20, rgbaColor=[0.8, 0.8, 0.8, 1.0], physicsClientId=id)
p.changeVisualShape(self.body, 40, rgbaColor=[0.6, 0.6, 0.6, 1.0], physicsClientId=id)
def reset_joints(self):
super(PR2, self).reset_joints()
# Position end effectors whith dual arm robots
self.set_joint_angles(self.right_arm_joint_indices, [-1.75, 1.25, -1.5, -0.5, -1, 0, -1])
self.set_joint_angles(self.left_arm_joint_indices, [1.75, 1.25, 1.5, -0.5, 1, 0, 1])
|
cacreader/swig-4.0.2/Examples/test-suite/python/director_default_runme.py | kyletanyag/LL-Smartcard | 1,031 | 12712775 | <gh_stars>1000+
from director_default import *
f = Foo()
f = Foo(1)
f = Bar()
f = Bar(1)
|
gobbli/test/test_util.py | RTIInternational/gobbli | 276 | 12712829 | <filename>gobbli/test/test_util.py
import gzip
import io
import tarfile
import zipfile
from pathlib import Path
from typing import List
import pytest
from gobbli.util import (
TokenizeMethod,
blob_to_dir,
detokenize,
dir_to_blob,
extract_archive,
is_archive,
shuffle_together,
tokenize,
)
def make_zip(tmpdir: Path, relative_paths: List[Path]) -> Path:
"""
Make a zip archive from a list of relative paths.
Create empty files at each path and add them to the archive.
"""
zip_path = tmpdir / "test.zip"
with zipfile.ZipFile(zip_path, "w") as z:
for relative_path in relative_paths:
full_path = tmpdir / relative_path
full_path.parent.mkdir(exist_ok=True, parents=True)
full_path.touch()
z.write(full_path, arcname=relative_path)
return zip_path
def make_tar_gz(tmpdir: Path, relative_paths: List[Path]) -> Path:
"""
Make a .tar.gz archive from a list of relative paths.
Create empty files at each path and add them to the archive.
"""
tar_path = tmpdir / "test.tar.gz"
with tarfile.open(tar_path, "w:gz") as z:
for relative_path in relative_paths:
full_path = tmpdir / relative_path
full_path.parent.mkdir(exist_ok=True, parents=True)
full_path.touch()
z.add(str(full_path), arcname=str(relative_path), recursive=False)
return tar_path
def make_gz(tmpdir: Path, name: str) -> Path:
"""
Create a gzip-compressed file with the given name under the given temp directory.
Return the path to the compressed file.
"""
gzip_path = tmpdir / f"{name}.gz"
with gzip.open(gzip_path, "wb") as z:
z.write(b"Test")
return gzip_path
TEST_ARCHIVE_DATA = ["./a", "./b/c"]
@pytest.mark.parametrize(
"archive_func,junk,expected_paths",
[
(make_zip, False, [Path("a"), Path("b") / "c"]),
(make_zip, True, [Path("a"), Path("c")]),
(make_tar_gz, False, [Path("a"), Path("b") / "c"]),
(make_tar_gz, True, [Path("a"), Path("c")]),
],
)
def test_extract_archive(tmpdir, archive_func, junk, expected_paths):
tmpdir_path = Path(tmpdir)
archive_path = archive_func(tmpdir_path, TEST_ARCHIVE_DATA)
archive_extract_dir = tmpdir_path / "extract"
extract_archive(archive_path, archive_extract_dir, junk_paths=junk)
for relative_path in expected_paths:
assert (archive_extract_dir / relative_path).exists()
def test_extract_gz(tmpdir):
tmpdir_path = Path(tmpdir)
filename = "test.txt"
archive_path = make_gz(tmpdir_path, "test.txt")
archive_extract_dir = tmpdir_path / "extract"
extract_archive(archive_path, archive_extract_dir)
assert (archive_extract_dir / filename).exists()
@pytest.mark.parametrize(
"name,expected_is_archive",
[
("test.tar.gz", True),
("test.gz", True),
("test.txt.gz", True),
("test.zip", True),
("test.xz", False),
("test.txt", False),
("test.vec", False),
("test.bin", False),
],
)
def test_is_archive(name, expected_is_archive):
assert is_archive(Path(name)) == expected_is_archive
def test_dir_to_blob(tmpdir):
test_dir = Path(tmpdir) / "test"
test_dir.mkdir()
test_file_name = "test.txt"
test_file = test_dir / test_file_name
file_contents = "test"
test_file.write_text(file_contents)
blob = dir_to_blob(test_dir)
fileobj = io.BytesIO(blob)
fileobj.seek(0)
extract_path = test_dir / "test2"
with tarfile.open(fileobj=fileobj, mode="r:gz") as archive:
archive.extractall(extract_path)
extracted_file = extract_path / test_file_name
assert extracted_file.exists()
assert extracted_file.read_text() == file_contents
def test_blob_to_dir(tmpdir):
test_dir = Path(tmpdir) / "test"
test_dir.mkdir()
test_file_name = "test.txt"
test_file = test_dir / test_file_name
file_contents = "test"
test_file.write_text(file_contents)
blob = dir_to_blob(test_dir)
extract_path = test_dir / "test2"
blob_to_dir(blob, extract_path)
extracted_file = extract_path / test_file_name
assert extracted_file.exists()
assert extracted_file.read_text() == file_contents
@pytest.mark.parametrize(
"l1,l2,err",
[
([], [], None),
(["a"], [1], None),
(["a", "b"], [1], ValueError),
(["a", "b"], [1, 2], None),
(["a", "b", "c"], [1, 2, 3], None),
(["a", "b", "c", "d"], [1, 2, 3, 4], None),
],
)
def test_shuffle_together(l1, l2, err):
seed = 1
if err is not None:
with pytest.raises(err):
shuffle_together(l1, l2, seed=seed)
else:
original_rows = set(zip(l1, l2))
shuffle_together(l1, l2, seed=seed)
for row in zip(l1, l2):
assert tuple(row) in original_rows
@pytest.mark.parametrize(
"text,tokens",
[
("This is a test.", ["this", "is", "a", "test."]),
("Two spaces", ["two", "spaces"]),
("Hyphenated-word", ["hyphenated-word"]),
("Numbers 1 and 2", ["numbers", "1", "and", "2"]),
],
)
def test_tokenize_split(text, tokens):
# Whitespace tokenization just splits on whitespace
assert tokenize(TokenizeMethod.SPLIT, [text]) == [tokens]
@pytest.mark.parametrize(
"text,tokens",
[
("This is a test.", ["this", "is", "a", "test"]),
("Two spaces", ["two", "spaces"]),
("Hyphenated-word", ["hyphenated", "word"]),
("Numbers 1 and 2", ["numbers", "and"]),
],
)
def test_tokenize_spacy(text, tokens):
# Spacy tokenization lowercases and removes non-alphabetic tokens
assert tokenize(TokenizeMethod.SPACY, [text]) == [tokens]
@pytest.mark.parametrize(
"tokenize_method", [TokenizeMethod.SPLIT, TokenizeMethod.SPACY]
)
@pytest.mark.parametrize(
"tokens,text",
[
(["this", "is", "a", "test"], "this is a test"),
(["hyphenated-word"], "hyphenated-word"),
(["try", ",", "punctuation", "."], "try , punctuation ."),
],
)
def test_detokenize_split_spacy(text, tokens, tokenize_method):
assert detokenize(tokenize_method, [tokens]) == [text]
@pytest.mark.parametrize("model_path", [Path("spm"), None])
def test_tokenize_detokenize_sentencepiece(tmpdir, model_path):
texts = ["a b c", "a ab c", "a b ac"]
# Model should be trained
if model_path is not None:
model_path = Path(tmpdir) / model_path
tokens = tokenize(
TokenizeMethod.SENTENCEPIECE, texts, model_path=model_path, vocab_size=7
)
# Control sequence indicating whitespace
_ = "▁"
expected_tokens = [
[_, "a", _, "b", _, "c"],
[_, "a", _, "a", "b", _, "c"],
[_, "a", _, "b", _, "a", "c"],
]
assert tokens == expected_tokens
# Can't detokenize if we didn't give a persistent model path to the tokenize
# function
if model_path is not None:
assert detokenize(TokenizeMethod.SENTENCEPIECE, tokens, model_path) == texts
# Previously should be reused with the old vocab size, and a new model
# shouldn't be trained
tokens = tokenize(TokenizeMethod.SENTENCEPIECE, texts, model_path=model_path)
assert tokens == expected_tokens
|
applications/pytorch/miniDALL-E/log.py | payoto/graphcore_examples | 260 | 12712887 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import logging
import sys
from logging import handlers
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Logger(metaclass=Singleton):
# Predefined log level includes, from highest to lowest severity:
# CRITICAL, ERROR, WARNING, INFO, DEBUG
def __init__(self, filename=None, level='INFO', when='D', backCount=3,
fmt='[%(asctime)s] %(message)s'):
assert filename is not None
self.filename = filename
self.logger = logging.getLogger(filename)
format_str = logging.Formatter(fmt)
self.logger.setLevel(logging.getLevelName(level))
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(format_str)
th = handlers.TimedRotatingFileHandler(filename=filename, when=when,
backupCount=backCount, encoding='utf-8')
th.setFormatter(format_str)
self.logger.addHandler(sh)
self.logger.addHandler(th)
if __name__ == '__main__':
log = Logger('all.log', level='ERROR')
log.logger.debug('debug')
log.logger.info('info')
log.logger.warning('warning')
log.logger.error('error')
log.logger.critical('critical')
|
up/tasks/det/plugins/condinst/models/postprocess/condinst_predictor.py | ModelTC/EOD | 196 | 12712897 | <reponame>ModelTC/EOD
import torch
from torch.nn import functional as F
from up.utils.general.registry_factory import MASK_PREDICTOR_REGISTRY
from up.utils.general.fp16_helper import to_float32
from up.tasks.det.plugins.condinst.models.head.condinst_head import aligned_bilinear
@MASK_PREDICTOR_REGISTRY.register('condinst')
class MaskPredictorCondinst(object):
def __init__(self,):
pass
@torch.no_grad()
@to_float32
def predict(self, mask_head, input, locations, controller, mask_gen_params):
mask_feats = input['mask_feats']
image_info = input['image_info']
image = input['image']
bboxes = input['dt_bboxes']
mask_head_params, fpn_levels, instance_locations, im_inds, pred_boxes = self.get_pred_instances(
input, controller, mask_gen_params)
mask_logits = mask_head.mask_heads_forward_with_coords(
mask_feats, locations, mask_head_params, fpn_levels, instance_locations, im_inds)
pred_global_masks = mask_logits.sigmoid()
dt_bboxes = []
dt_masks = []
for im_id, (image_size,) in enumerate(zip(image_info)):
ind_per_im = torch.nonzero(im_inds == im_id)[:, 0]
pred_masks, ind_per_im_keep = self.postprocess(
image, ind_per_im, image_size, pred_boxes, pred_global_masks
)
dt_bboxes.append(bboxes[ind_per_im_keep])
for idx in range(len(ind_per_im_keep)):
dt_masks.append(pred_masks[idx].detach().cpu().numpy())
dt_bboxes = torch.cat(dt_bboxes, dim=0)
return {'dt_masks': dt_masks, 'dt_bboxes': dt_bboxes}
def get_pred_instances(self, input, controller, mask_gen_params):
B = controller[0].shape[0]
K = sum([x.shape[1] for x in controller])
bboxes = input['dt_bboxes']
pos_inds = input['pos_inds']
im_inds, cls_rois, scores, cls = torch.split(bboxes, [1, 4, 1, 1], dim=1)
im_inds = im_inds.squeeze().type(torch.LongTensor).to(pos_inds.device)
pos_inds = pos_inds.squeeze().add(im_inds * K).type(torch.LongTensor)
mask_head_params = torch.cat(controller, dim=1).reshape(-1, mask_gen_params)[pos_inds]
mlvl_locations = input['mlvl_locations']
instance_locations = torch.cat(mlvl_locations).repeat(B, 1)[pos_inds]
fpn_levels = torch.cat([mlvl_locations[lvl_num].new_ones(len(mlvl_locations[lvl_num]),
dtype=torch.long) * lvl_num for lvl_num in range(len(mlvl_locations))])
fpn_levels = fpn_levels.repeat(B)[pos_inds].type(torch.LongTensor)
return mask_head_params, fpn_levels, instance_locations, im_inds, cls_rois
def postprocess(self, image, ind_per_im, image_size, pred_boxes, pred_global_masks=None, mask_threshold=0.5):
padded_im_h, padded_im_w = (image.shape[-2], image.shape[-1])
resized_im_h, resized_im_w = (image_size[0], image_size[1])
output_height, output_width = (image_size[3], image_size[4])
scale_x, scale_y = (output_width / resized_im_w, output_height / resized_im_h)
output_boxes = pred_boxes[ind_per_im]
output_boxes[:, 0::2] *= scale_x
output_boxes[:, 1::2] *= scale_y
output_boxes[:, 0] = torch.clamp(output_boxes[:, 0], min=0, max=output_width)
output_boxes[:, 1] = torch.clamp(output_boxes[:, 1], min=0, max=output_height)
output_boxes[:, 2] = torch.clamp(output_boxes[:, 2], min=0, max=output_width)
output_boxes[:, 3] = torch.clamp(output_boxes[:, 3], min=0, max=output_height)
keep_inds = ((output_boxes[:, 2] - output_boxes[:, 0])
> 0.0) & ((output_boxes[:, 3] - output_boxes[:, 1]) > 0.0)
ind_per_im = ind_per_im[keep_inds]
if pred_global_masks is not None:
pred_global_masks = pred_global_masks[ind_per_im]
mask_h, mask_w = pred_global_masks.size()[-2:]
factor_h = padded_im_h // mask_h
factor_w = padded_im_w // mask_w
assert factor_h == factor_w
factor = factor_h
pred_global_masks = aligned_bilinear(
pred_global_masks, factor
)
pred_global_masks = pred_global_masks[:, :, :resized_im_h, :resized_im_w]
pred_global_masks = F.interpolate(
pred_global_masks,
size=(output_height, output_width),
mode="bilinear", align_corners=False
)
pred_global_masks = pred_global_masks[:, 0, :, :]
pred_masks = (pred_global_masks > mask_threshold).float()
return pred_masks, ind_per_im
def build_mask_predictor(predictor_cfg):
return MASK_PREDICTOR_REGISTRY.build(predictor_cfg)
|
PyPtt/screens.py | Truth0906/PTTLibrary | 260 | 12712898 | import re
import sys
try:
from . import lib_util
from . import log
except ModuleNotFoundError:
import lib_util
import log
class Target(object):
MainMenu = [
'離開,再見…',
'人, 我是',
'[呼叫器]',
]
MainMenu_Exiting = [
'【主功能表】',
'您確定要離開',
]
QueryPost = [
'請按任意鍵繼續',
'───────┘',
]
InBoard = [
'看板資訊/設定',
'文章選讀',
'相關主題'
]
InBoardWithCursor = [
'【',
'看板資訊/設定',
]
# (h)說明 (←/q)離開
# (y)回應(X%)推文(h)說明(←)離開
# (y)回應(X/%)推文 (←)離開
InPost = [
'瀏覽',
'頁',
')離開'
]
PostEnd = [
'瀏覽',
'頁 (100%)',
')離開'
]
InWaterBallList = [
'瀏覽',
'頁',
'說明',
]
WaterBallListEnd = [
'瀏覽',
'頁 (100%)',
'說明'
]
PostIP_New = [
'※ 發信站: 批踢踢實業坊(ptt.cc), 來自:'
]
PostIP_Old = [
'◆ From:'
]
Edit = [
'※ 編輯'
]
PostURL = [
'※ 文章網址'
]
Vote_Type1 = [
'◆ 投票名稱',
'◆ 投票中止於',
'◆ 票選題目描述'
]
Vote_Type2 = [
'投票名稱',
'◆ 預知投票紀事',
]
AnyKey = '任意鍵'
InTalk = [
'【聊天說話】',
'線上使用者列表',
'查詢網友',
'顯示上幾次熱訊'
]
InUserList = [
'休閒聊天',
'聊天/寫信',
'說明',
]
InMailBox = [
'【郵件選單】',
'鴻雁往返'
]
InMailMenu = [
'【電子郵件】',
'我的信箱',
'把所有私人資料打包回去',
'寄信給帳號站長',
]
PostNoContent = [
'◆ 此文章無內容',
AnyKey
]
InBoardList = [
'【看板列表】',
'選擇看板',
'只列最愛',
'已讀/未讀'
]
UseTooManyResources = [
'程式耗用過多計算資源'
]
Animation = [
'★ 這份文件是可播放的文字動畫,要開始播放嗎?'
]
CursorToGoodbye = MainMenu.copy()
def show(config, screen_queue, function_name=None):
if config.log_level != log.level.TRACE:
return
if isinstance(screen_queue, list):
for Screen in screen_queue:
print('-' * 50)
try:
print(
Screen.encode(
sys.stdin.encoding, "replace").decode(
sys.stdin.encoding))
except Exception:
print(Screen.encode('utf-8', "replace").decode('utf-8'))
else:
print('-' * 50)
try:
print(screen_queue.encode(
sys.stdin.encoding, "replace").decode(
sys.stdin.encoding))
except Exception:
print(screen_queue.encode('utf-8', "replace").decode('utf-8'))
print('len:' + str(len(screen_queue)))
if function_name is not None:
print('錯誤在 ' + function_name + ' 函式發生')
print('-' * 50)
displayed = False
def vt100(ori_screen: str, no_color: bool = True) -> str:
result = ori_screen
if no_color:
result = re.sub('\x1B\[[\d+;]*m', '', result)
result = re.sub(r'[\x1B]', '=PTT=', result)
# global displayed
# if not displayed:
# display = ('★' in result)
# if display:
# displayed = True
# else:
# display = False
#
# if display:
# print('=1=' * 10)
# print(result)
# print('=2=' * 10)
# result = '\n'.join(
# [x.rstrip() for x in result.split('\n')]
# )
# 編輯文章時可能會有莫名的清空問題,需再注意
# if result.endswith('=PTT=[H'):
# print('!!!!!!!!=PTT=[H=PTT=[H=PTT=!!!!!!!!!!!!!!!')
while '=PTT=[H' in result:
if result.count('=PTT=[H') == 1 and result.endswith('=PTT=[H'):
break
result = result[result.find('=PTT=[H') + len('=PTT=[H'):]
while '=PTT=[2J' in result:
result = result[result.find('=PTT=[2J') + len('=PTT=[2J'):]
pattern_result = re.compile('=PTT=\[(\d+);(\d+)H$').search(result)
last_position = None
if pattern_result is not None:
# print(f'Before [{pattern_result.group(0)}]')
last_position = pattern_result.group(0)
# 進入 PTT 時,有時候會連分類看版一起傳過來然後再用主功能表畫面直接繪製畫面
# 沒有[H 或者 [2J 導致後面的繪製行數錯誤
if '=PTT=[1;3H主功能表' in result:
result = result[result.find('=PTT=[1;3H主功能表') + len('=PTT=[1;3H主功能表'):]
# if '=PTT=[1;' in result:
# if last_position is None:
# result = result[result.rfind('=PTT=[1;'):]
# elif not last_position.startswith('=PTT=[1;'):
# result = result[result.rfind('=PTT=[1;'):]
# print('-'*50)
# print(result)
result_list = re.findall('=PTT=\[(\d+);(\d+)H', result)
for (line_count, space_count) in result_list:
line_count = int(line_count)
space_count = int(space_count)
current_line = result[
:result.find(
f'[{line_count};{space_count}H'
)].count('\n') + 1
# if display:
# print(f'>{line_count}={space_count}<')
# print(f'>{current_line}<')
if current_line > line_count:
# if LastPosition is None:
# pass
# elif LastPosition != f'=PTT=[{line_count};{space_count}H':
# print(f'current_line [{current_line}]')
# print(f'line_count [{line_count}]')
# print('Clear !!!')
# print(f'!!!!!!!!=PTT=[{line_count};{space_count}H')
result_lines = result.split('\n')
target_line = result_lines[line_count - 1]
if f'=PTT=[{line_count};{space_count}H=PTT=[K' in result:
# 如果有 K 則把該行座標之後,全部抹除
target_line = target_line[:space_count - 1]
# OriginIndex = -1
origin_line = None
# for i, line in enumerate(result_lines):
for line in result_lines:
if f'=PTT=[{line_count};{space_count}H=PTT=[K' in line:
# OriginIndex = i
origin_line = line
break
if origin_line.count('=PTT=') > 2:
origin_line = origin_line[
:lib_util.findnth(origin_line, '=PTT=', 3)
]
# result_lines[OriginIndex] = result_lines[OriginIndex].replace(
# origin_line,
# ''
# )
origin_line = origin_line[
len(f'=PTT=[{line_count};{space_count}H=PTT=[K'):
]
# log.showValue(
# log.level.INFO,
# 'origin_line',
# origin_line
# )
new_target_line = f'{target_line}{origin_line}'
result_lines[line_count - 1] = new_target_line
result = '\n'.join(result_lines)
elif current_line == line_count:
# print(f'!!!!!=PTT=[{line_count};{space_count}H')
current_space = result[
:result.find(
f'=PTT=[{line_count};{space_count}H'
)]
current_space = current_space[
current_space.rfind('\n') + 1:
]
# if display:
# print(f'>>{current_space}<<')
# print(f'ori length>>{len(current_space)}<<')
# newversion_length = len(current_space.encode('big5uao', 'ignore'))
# print(f'newversion_length >>{newversion_length}<<')
# current_space = len(current_space.encode('big5', 'replace'))
current_space = len(current_space)
# if display:
# print(f'!!!!!{current_space}')
if current_space > space_count:
# if display:
# print('1')
result = result.replace(
f'=PTT=[{line_count};{space_count}H',
(line_count - current_line) * '\n' + space_count * ' '
)
else:
# if display:
# print('2')
result = result.replace(
f'=PTT=[{line_count};{space_count}H',
(line_count - current_line) * '\n' + (space_count - current_space) * ' '
)
else:
result = result.replace(
f'=PTT=[{line_count};{space_count}H',
(line_count - current_line) * '\n' + space_count * ' '
)
# while '=PTT=[K' in result:
# Target = result[result.find('=PTT=[K'):]
# print(f'Target[{Target}]')
# index1 = Target.find('\n')
# index2 = Target.find('=PTT=')
# if index2 == 0:
# index = index1
# else:
# index = min(index1, index2)
# break
# Target = Target[:index]
# print('===' * 20)
# print(result)
# print('-=-' * 20)
# print(Target)
# print('===' * 20)
# result = result.replace(Target, '')
# print(Target)
# print('===' * 20)
if last_position is not None:
result = result.replace(last_position, '')
# if display:
# print('-Final-' * 10)
# print(result)
# print('-Final-' * 10)
return result
|
docs/generate.py | tenjupaul/pocketlang | 1,323 | 12712902 | <filename>docs/generate.py<gh_stars>1000+
#!python
## Copyright (c) 2021 <NAME>
## Licensed under: MIT License
from markdown import markdown
from os.path import join
import os, sys, shutil, re
## TODO: This is a quick and dirty script to generate html
## from markdown. Refactor this file in the future.
## Usage:
## to generate pages : python generate.py
## to clean pages : python generate.py (-c, --clean)
TEMPLATE_PATH = 'static/template.html'
ROOT_URL = 'https://thakeenathees.github.io/pocketlang/'
## Home page should be in the SOURCE_DIR.
HOME_PAGE = 'home.md'
TRY_PAGE = 'try-it-now.html'
SOURCE_DIR = 'pages/'
TARGET_DIR = 'build/'
STATIC_DIR = 'static/'
## Additional source files of wasm try online page.
WASM_SOURCE_FILES = '''\
<script type="text/javascript" src="{{ STATIC_DIR }}codejar/codejar.js"></script>
<script type="text/javascript" src="{{ STATIC_DIR }}codejar/linenumbers.js"></script>
<link rel="stylesheet" type="text/css" href="{{ STATIC_DIR }}codejar/style.css" />
<script type="text/javascript" src="{{ STATIC_DIR }}prism/prism.js"></script>
<link rel="stylesheet" type="text/css" href="{{ STATIC_DIR }}prism/prism.css" />
<script type="text/javascript" src="{{ STATIC_DIR }}try_now.js"></script>
'''
## Navigation pages in order. Should match the path names.
## Any file/folder name shouldn't contain white space.
PAGES = [
('Getting-Started', [
TRY_PAGE,
'learn-in-15-minutes.md',
'build-from-source.md',
'contributing.md',
]),
('Language-API', [
'variables.md',
'functions.md',
'fibers.md',
'modules.md',
]),
]
def new_context():
return {
'{{ TITLE }}' : '',
'{{ NAVIGATION }}' : '',
'{{ CONTENT }}' : '',
'{{ HOME_URL }}' : '',
'{{ STATIC_DIR }}' : '',
}
def main():
## Remove generated files and create empty target dir with static files.
if os.path.exists(TARGET_DIR):
remove_ignore = ( '.git', )
for _dir in os.listdir(TARGET_DIR):
if _dir in remove_ignore: continue
if os.path.isdir(join(TARGET_DIR,_dir)):
shutil.rmtree(join(TARGET_DIR, _dir))
else:
os.remove(join(TARGET_DIR, _dir))
shutil.copytree(STATIC_DIR, join(TARGET_DIR, STATIC_DIR))
open(join(TARGET_DIR, '.nojekyll'), 'w').close()
## Initialize the template and navigation.
template = ''
navigation = generate_navigation()
with open(TEMPLATE_PATH, 'r') as f:
template = f.read()
## Generate the home page.
index_html = join(TARGET_DIR, 'index.html')
ctx = generate_page_context(join(SOURCE_DIR, HOME_PAGE),
index_html, navigation)
write_page(ctx, template, index_html)
for entry in PAGES: ## entry = ('dirname', [files...])
_dir = entry[0]
for file in entry[1]:
ext = get_validated_ext(file)
path = join(SOURCE_DIR, _dir, file)
dst = ''; path_prefix = _dir.lower().replace(' ', '-') + '-'
if ext == '.md':
dst = join(TARGET_DIR, path_prefix + file.replace('.md', '.html'))
else:
dst = join(TARGET_DIR, path_prefix + file)
ctx = generate_page_context(path, dst, navigation)
_template = template
if file == TRY_PAGE:
_template = template.replace('{{ WASM_SOURCE_FILES }}',
WASM_SOURCE_FILES)
write_page(ctx, _template, dst)
pass
def generate_navigation():
navigation = ''
for entry in PAGES:
_dir = entry[0]
title = _dir.replace('-', ' ').title()
navigation += '<div class="navigation">\n'
navigation += '<h3><strong>%s</strong></h3>\n' % (title)
navigation += '<ul class="menu">\n'
for file in entry[1]:
ext = get_validated_ext(file)
link = '' ## Assuming that file name don't contain '.md' at the middle.
path_prefix = _dir.lower().replace(' ', '-') + '-'
if ext == '.md':
link = join(ROOT_URL, path_prefix + file.replace('.md', '.html'))
else:
link = join(ROOT_URL, path_prefix + file)
link = link.replace('\\', '/')
title = file.replace(ext, '').replace('-', ' ').title()
navigation += '<li><a href="%s">%s</a></li>\n' % (link, title)
navigation += '</ul>\n'
navigation += '</div>\n'
return navigation
def generate_page_context(src, dst, navigation):
title = path_to_title(src)
content = path_to_content(src)
ctx = new_context()
ctx[ '{{ TITLE }}' ] = title
ctx[ '{{ NAVIGATION }}' ] = navigation
ctx[ '{{ CONTENT }}' ] = content
ctx[ '{{ HOME_URL }}' ] = ROOT_URL + 'index.html'
ctx[ '{{ STATIC_DIR }}' ] = STATIC_DIR
return ctx;
def get_validated_ext(path):
ext = ''
if path.endswith('.md'): ext = '.md'
elif path.endswith('.html'): ext = '.html'
else: raise Exception('Expected .md / .html file.')
return ext
## Get the title from the src path.
def path_to_title(path):
ext = get_validated_ext(path)
title = os.path.basename(path).replace(ext, '').title()
title += ' - PocketLang'
return title
## Generate html content from the markdown source path.
## If the path is an .html file return it's content.
def path_to_content(src):
text = ''
with open(src, 'r') as f:
text = f.read()
## If html file we're done.
if get_validated_ext(src) == '.html':
return text
assert(src.endswith('.md'))
text = custom_md_override(text)
content = markdown(text, extensions=['codehilite', 'fenced_code'])
## A wakey way to inject html overrides to highlight out language
## I'm not focusing on generating the pages and this is a wakey way to
## do so. This should be done with a good static page generater instead
## of this script.
return custom_html_override(src, content)
## Inject our custom markdown text override.
def custom_md_override(text):
## Add html anchor.
for pre in ('#', '##', '###'):
pattern = '(^' + pre + r' \s*%%(.*)%%\n)'
for match, title in re.findall(pattern, text, flags=re.MULTILINE):
link = title.strip().lower().replace(' ', '-')
text = text.replace(match,
f'{pre} {title} <a href="#{link}" name="{link}" class="anchor">#</a>')
return text
## Inject our custom html overrides.
def custom_html_override(src, content):
## FIXME: I should create a pygment lexer.
## A dirty way to inject our keyword (to ruby's).
addnl_keywords = [
'null', 'from', 'import', 'as', 'func', 'native', 'continue'
]
not_keyword = [
'alias', 'begin', 'case', 'next', 'nil', 'redo', 'rescue',
'retry', 'ensure', 'undef', 'unless', 'super', 'until', 'when',
'defined',
]
for kw in addnl_keywords:
content = content.replace('<span class="n">%s</span>' % kw,
'<span class="k">%s</span>' % kw)
for nk in not_keyword:
content = content.replace('<span class="k">%s</span>' % nk,
'<span class="n">%s</span>' % nk)
## codehilite mark the compilation command as error.
content = content.replace('<span class="err">', '<span>')
return content
def write_page(ctx, template, dst):
_dir = os.path.dirname(dst)
if _dir not in ('.', './', '') and not os.path.exists(_dir):
os.makedirs(os.path.dirname(dst))
page = template
for key, value in ctx.items():
page = page.replace(key, value)
page = page.replace('{{ WASM_SOURCE_FILES }}', '')
with open(dst, 'w') as f:
f.write(page)
if __name__ == '__main__':
_local = False
if len(sys.argv) >= 2:
if sys.argv[1] == 'local':
_local = True
#ROOT_URL = 'http://localhost:8000/'
ROOT_URL = '' ## No more nested directory pages.
main()
## Write a batch file to start the server in windows.
if _local and os.name == 'nt':
with open(join(TARGET_DIR, 'server.bat'), 'w') as f:
f.write('python -m http.server 8000')
print('Static pages generated' +\
('for localhost:8000.' if _local else '.'))
|
tests/test_losses.py | bendavidsteel/neuroptica | 161 | 12712922 | import unittest
from neuroptica.layers import Activation, ClementsLayer
from neuroptica.losses import CategoricalCrossEntropy, MeanSquaredError
from neuroptica.models import Sequential
from neuroptica.nonlinearities import *
from neuroptica.optimizers import Optimizer
from tests.base import NeuropticaTest
from tests.test_models import TestModels
class TestLosses(NeuropticaTest):
'''Tests for model losses'''
def test_loss_gradients(self):
N = 7
losses = [MeanSquaredError, CategoricalCrossEntropy]
for loss in losses:
print("Testing loss {}".format(loss))
batch_size = 6
n_samples = batch_size * 4
# Generate random points and label them (one-hot) according to index of max element
X_all = (2 * np.random.rand(N * n_samples) - 1).reshape((N, n_samples)) # random N-D points
X_max = np.argmax(X_all, axis=0)
Y_all = np.zeros((N, n_samples))
Y_all[X_max, np.arange(n_samples)] = 1.0
# Make a single-layer model
model = Sequential([
ClementsLayer(N),
Activation(AbsSquared(N))
])
for X, Y in Optimizer.make_batches(X_all, Y_all, batch_size):
# Propagate the data forward
Y_hat = model.forward_pass(X)
d_loss = loss.dL(Y_hat, Y)
# Compute the backpropagated signals for the model
gradients = model.backward_pass(d_loss)
TestModels.verify_model_gradients(model, X, Y, loss.L, gradients, epsilon=1e-6)
if __name__ == "__main__":
unittest.main()
|
tests/fixtures/builtin.py | WillDaSilva/mkdocstrings | 354 | 12712951 | def func(foo=print):
"""test"""
|
attendance/serializers.py | akshaya9/fosswebsite | 369 | 12712957 | <reponame>akshaya9/fosswebsite<filename>attendance/serializers.py
from rest_framework import serializers
from attendance.models import SSIDName
class SSIDNameSerializer(serializers.ModelSerializer):
class Meta:
model = SSIDName
fields = ['name']
read_only_fields = ['name']
|
recipes/tests.py | TechNic11/Try-Django-3.2 | 136 | 12713004 | <gh_stars>100-1000
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.test import TestCase
from .models import RecipeIngredient, Recipe
User = get_user_model()
class UserTestCase(TestCase):
def setUp(self):
self.user_a = User.objects.create_user('cfe', password='<PASSWORD>')
def test_user_pw(self):
checked = self.user_a.check_password("<PASSWORD>")
self.assertTrue(checked)
class RecipeTestCase(TestCase):
def setUp(self):
self.user_a = User.objects.create_user('cfe', password='<PASSWORD>')
self.recipe_a = Recipe.objects.create(
name='Grilled Chicken',
user = self.user_a
)
self.recipe_b = Recipe.objects.create(
name='Grilled Chicken Tacos',
user = self.user_a
)
self.recipe_ingredient_a = RecipeIngredient.objects.create(
recipe=self.recipe_a,
name='Chicken',
quantity='1/2',
unit='pound'
)
self.recipe_ingredient_b = RecipeIngredient.objects.create(
recipe=self.recipe_a,
name='Chicken',
quantity='asdfasd',
unit='pound'
)
def test_user_count(self):
qs = User.objects.all()
self.assertEqual(qs.count(), 1)
def test_user_recipe_reverse_count(self):
user = self.user_a
qs = user.recipe_set.all()
self.assertEqual(qs.count(), 2)
def test_user_recipe_forward_count(self):
user = self.user_a
qs = Recipe.objects.filter(user=user)
self.assertEqual(qs.count(), 2)
def test_recipe_ingredient_reverse_count(self):
recipe = self.recipe_a
qs = recipe.recipeingredient_set.all()
self.assertEqual(qs.count(), 2)
def test_recipe_ingredientcount(self):
recipe = self.recipe_a
qs = RecipeIngredient.objects.filter(recipe=recipe)
self.assertEqual(qs.count(), 2)
def test_user_two_level_relation(self):
user = self.user_a
qs = RecipeIngredient.objects.filter(recipe__user=user)
self.assertEqual(qs.count(), 2)
def test_user_two_level_relation_reverse(self):
user = self.user_a
recipeingredient_ids = list(user.recipe_set.all().values_list('recipeingredient__id', flat=True))
qs = RecipeIngredient.objects.filter(id__in=recipeingredient_ids)
self.assertEqual(qs.count(), 2)
def test_user_two_level_relation_via_recipes(self):
user = self.user_a
ids = user.recipe_set.all().values_list("id", flat=True)
qs = RecipeIngredient.objects.filter(recipe__id__in=ids)
self.assertEqual(qs.count(), 2)
def test_unit_measure_validation(self):
invalid_unit = 'ounce'
ingredient = RecipeIngredient(
name='New',
quantity=10,
recipe=self.recipe_a,
unit=invalid_unit
)
ingredient.full_clean()
def test_unit_measure_validation_error(self):
invalid_units = ['nada', 'asdfadsf']
with self.assertRaises(ValidationError):
for unit in invalid_units:
ingredient = RecipeIngredient(
name='New',
quantity=10,
recipe=self.recipe_a,
unit=unit
)
ingredient.full_clean()
def test_quantity_as_float(self):
self.assertIsNotNone(self.recipe_ingredient_a.quantity_as_float)
self.assertIsNone(self.recipe_ingredient_b.quantity_as_float)
|
perf_dashboard/python_clientlibs_download.py | harshb36/python-runtime | 207 | 12713005 | <reponame>harshb36/python-runtime
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import sys
import time
import uuid
from google.cloud import bigquery
import bq_utils
GCLOUD_PROJECT_ENV = 'GCLOUD_PROJECT'
DATETIME_FORMAT = '%Y%m%d'
DATASET_NAME = 'python_clientlibs_download_by_week'
VENEER_TABLE_NAME = 'veneer_client_libs'
STACKDRIVER_TABLE_NAME = 'stackdriver_client_libs'
GRPC_TABLE_NAME = 'grpc_lib'
THIRD_PARTY_TABLE_NAME = 'third_party_client_libs'
TABLES = [
VENEER_TABLE_NAME,
GRPC_TABLE_NAME,
STACKDRIVER_TABLE_NAME,
THIRD_PARTY_TABLE_NAME,
]
CLIENTLIBS = {
VENEER_TABLE_NAME: [
'google-cloud-core',
'google-cloud-speech',
'google-cloud-language',
'google-cloud-pubsub',
'google-cloud-bigquery',
'google-cloud-bigtable',
'google-cloud-datastore',
'google-cloud-spanner',
'google-cloud-storage',
'google-cloud-vision',
'google-cloud-translate',
'google-cloud-dns',
'google-cloud-videointelligence',
],
STACKDRIVER_TABLE_NAME: [
'google-cloud-logging',
'google-cloud-monitoring',
'google-cloud-error_reporting',
'google-cloud-trace',
],
GRPC_TABLE_NAME: [
'grpcio',
],
THIRD_PARTY_TABLE_NAME: [
'pandas-gbq',
]
}
def get_weekly_clientlibs_downloads(clientlibs_table_name, date_str):
"""Use a SQL query to collect the weekly download data of the client
libraries.
Args:
clientlibs_table_name (str): Table name, which is the key in the
CLIENTLIBS dict.
date_str (str): A date string in "YYYYMMDD" format.
Returns:
list: rows of the query result.
"""
client_libs = CLIENTLIBS[clientlibs_table_name]
date_time = datetime.datetime.strptime(date_str, DATETIME_FORMAT)
week_dates = [(date_time + datetime.timedelta(days=-i))
.strftime(DATETIME_FORMAT)
for i in range(7)]
query = """
SELECT
file.project as client_library_name,
COUNT(*) as download_count
FROM
`the-psf.pypi.downloads*`
WHERE
file.project IN UNNEST(@client_libs)
AND
_TABLE_SUFFIX IN UNNEST(@week_dates)
GROUP BY client_library_name
"""
client = bigquery.Client()
query_parameters=[
bigquery.ArrayQueryParameter(
'client_libs', 'STRING', client_libs),
bigquery.ArrayQueryParameter(
'week_dates', 'STRING', week_dates)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_parameters
query_job = client.query(query, job_config=job_config)
# Wait for the job to complete and get the results
results = [row.values() for row in query_job.result()]
rows = [(date_time,) + row for row in results]
return rows
def main():
for table_name in CLIENTLIBS.keys():
rows = get_weekly_clientlibs_downloads(
clientlibs_table_name=table_name,
date_str=datetime.datetime.now().strftime("%Y%m%d"))
bq_utils.insert_rows(
project=os.environ.get(GCLOUD_PROJECT_ENV),
dataset_name=DATASET_NAME,
table_name=table_name,
rows=rows)
if __name__ == '__main__':
main()
|
python/KerasModelRestoration.py | GangababuManam/tensorflow-101 | 832 | 12713013 | <reponame>GangababuManam/tensorflow-101
import tensorflow as tf
import numpy as np
from keras.models import Sequential
from keras.models import load_model
from keras.models import model_from_json
from keras.layers.core import Dense, Activation
from keras.utils import np_utils
#----------------------------
train = False
load_all_model = True #if train is False
#----------------------------
#preparing data for Exclusive OR (XOR)
attributes = [
#x1, x2
[0 ,0]
, [0, 1]
, [1, 0]
, [1, 1]
]
labels = [
#is_0, is_1 -> only a column can be 1 in labels variable
[1, 0]
, [0, 1]
, [0, 1]
, [1, 0]
]
#transforming attributes and labels matrixes to numpy
data = np.array(attributes, 'int64')
target = np.array(labels, 'int64')
#----------------------------
#creating model
if train == True:
model = Sequential()
model.add(Dense(3 #num of hidden units
, input_shape=(len(attributes[0]),))) #num of features in input layer
model.add(Activation('sigmoid')) #activation function from input layer to 1st hidden layer
model.add(Dense(len(labels[0]))) #num of classes in output layer
model.add(Activation('softmax')) #activation function from 1st hidden layer to output layer
model_config = model.to_json()
open("model_structure.json", "w").write(model_config)
#compile
model.compile(loss='categorical_crossentropy', optimizer='adam')
#training
model.fit(data, target, epochs=2000, verbose=0)
model.save("model.hdf5")
model.save_weights('model_weights.h5')
else:
if load_all_model == True:
model = load_model("model.hdf5") #model structure, weights
print("network structure and weights loaded")
else:
model = model_from_json(open("model_structure.json", "r").read()) #load structure
print("network structure loaded")
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.load_weights('model_weights.h5') #load weights
print("weights loaded")
score = model.evaluate(data, target)
print(score) |
src/utils/lastfm_etl/lastfm.py | LaudateCorpus1/hermes-5 | 135 | 12713050 | #!/usr/bin/env python
"""Translate the Last.fm data files to JSON.
This script takes the various Last.fm data files and write them out as
JSON. It removes the Last.fm artist URLs.
Attributes:
ARTISTS (dict): A dictionary that stores information about the artists. The
variables are as follows:
- artist_id (int): A unique identifier for each artist.
- name (str): The name of the artist.
FRIENDS (dict): A dictionary that stores information about the friends
graph. The variables are as follows:
- user_id (int): A unique identifier for each user.
- friend_user_id (int): A unique identifier of a user on the
friends list.
TAGS (dict): A dictionary that stores information about the tags. The
variables are as follows:
- tag_id (int): A unique identifier for each tag.
- name (int): The name of the tag.
PLAYS (dict): A dictionary that stores information about the number of
plays by each user. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- plays (int): The number of plays by the user of the artist.
APPLIED_TAGS (dict): A dictionary that stores information about the tags
various users applied to various artists. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- tag_id (int): A unique identifier for each tag.
- day (int): The day the tag was added.
- month (int): The month the tag was added.
- year (int): The year the tag was added.
"""
from copy import deepcopy
import json
import csv
# JSON objects
ARTISTS = {
"artist_id": None,
"name": None,
}
FRIENDS = {
"user_id": None,
"friend_user_id": None,
}
TAGS = {
"tag_id": None,
"name": None,
}
PLAYS = {
"user_id": None,
"artist_id": None,
"plays": None,
}
APPLIED_TAGS = {
"user_id": None,
"artist_id": None,
"tag_id": None,
"day": None,
"month": None,
"year": None,
}
def convert_str(string):
"""Convert a string from 'iso-8859-1' to 'utf8'."""
return string.decode('iso-8859-1').encode('utf8')
def iter_lines(open_file):
"""Open the Last.fm CSVs and return an iterator over the lines.
Args:
open_file: A file handle object from open().
Retunrs:
iterator: An iterator over each line in the file. Each line is a list,
with string elements for each column value.
"""
reader = csv.reader(
open_file,
delimiter='\t',
)
next(reader) # Skip the header
return reader
def parse_artist_line(line):
"""Parse a line from the Artist CSV file.
A line is a list of strings as follows:
line = [
artist_id,
name,
band_url,
band_photo_url,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "artist_id" and
"name".
"""
(artist_id, name, _, _) = line
current_artist = deepcopy(ARTISTS)
current_artist["artist_id"] = int(artist_id)
current_artist["name"] = name
return current_artist
def parse_friends_line(line):
"""Parse a line from the Friends CSV file.
A line is a list of strings as follows:
line = [
user_id,
user_id_of_friend,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id" and "friend_user_id".
"""
(user_id, friend_id) = line
current_friend = deepcopy(FRIENDS)
current_friend["user_id"] = int(user_id)
current_friend["friend_user_id"] = int(friend_id)
return current_friend
def parse_tag_line(line):
"""Parse a line from the Tag CSV file.
A line is a list of strings as follows:
line = [
tag_id,
tag,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "tag_id" and "tag".
"""
(tag_id, tag) = line
current_tag = deepcopy(TAGS)
current_tag["tag_id"] = int(tag_id)
current_tag["name"] = convert_str(tag)
return current_tag
def parse_applied_tag_line(line):
"""Parse a line from the Applied Tags CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
tag_id,
day,
month,
year,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id",
"tag_id", "day", "month", and "year".
"""
(user_id, artist_id, tag_id, day, month, year) = line
current_tag = deepcopy(APPLIED_TAGS)
current_tag["user_id"] = int(user_id)
current_tag["artist_id"] = int(artist_id)
current_tag["tag_id"] = int(tag_id)
current_tag["day"] = int(day)
current_tag["month"] = int(month)
current_tag["year"] = int(year)
return current_tag
def parse_plays_line(line):
"""Parse a line from the Played Artists CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
play_count,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id", and
"plays".
"""
(user_id, artist_id, plays) = line
current_plays = deepcopy(PLAYS)
current_plays["user_id"] = int(user_id)
current_plays["artist_id"] = int(artist_id)
current_plays["plays"] = int(plays)
return current_plays
if __name__ == "__main__":
import argparse
# Set up command line flag handling
parser = argparse.ArgumentParser(
description="Transform the Last.FM datasets to JSON",
)
parser.add_argument(
'artists',
type=str,
help="the file containing the artists, normally 'artists.dat'",
)
parser.add_argument(
'tags',
type=str,
help="the file containing the tags, normally 'tags.dat'",
)
parser.add_argument(
'friends',
type=str,
help="the file containing the friends graph, normally 'user_friends.dat'",
)
parser.add_argument(
'applied_tags',
type=str,
help="the file containing the applied tags, normally 'user_taggedartists.dat'",
)
parser.add_argument(
'plays',
type=str,
help="the file containing the play counts, normally 'user_artists.dat'",
)
parser.add_argument(
'-o',
'--output-directory',
type=str,
action="store",
help="the directory to save the output JSON files, by default the current directory",
default="./",
)
args = parser.parse_args()
# Parse the files
processing_queue = (
(args.artists, args.output_directory + "/lastfm_artists.json", parse_artist_line),
(args.tags, args.output_directory + "/lastfm_tags.json", parse_tag_line),
(args.friends, args.output_directory + "/lastfm_friends.json", parse_friends_line),
(args.applied_tags, args.output_directory + "/lastfm_applied_tags.json", parse_applied_tag_line),
(args.plays, args.output_directory + "/lastfm_plays.json", parse_plays_line),
)
for input_file, output_file, function in processing_queue:
with open(input_file, 'rb') as csv_file, open(output_file, 'w') as json_file:
for row in iter_lines(csv_file):
json_file.write(json.dumps(function(row)) + '\n')
|
kmip/core/secrets.py | ondrap/PyKMIP | 179 | 12713051 | <reponame>ondrap/PyKMIP
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from kmip.core.attributes import CertificateType
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core import exceptions
from kmip.core.misc import CertificateValue
from kmip.core import objects
from kmip.core.objects import Attribute
from kmip.core.objects import KeyBlock
from kmip.core import primitives
from kmip.core.primitives import Struct
from kmip.core.primitives import Enumeration
from kmip.core.primitives import ByteString
from kmip.core import utils
from kmip.core.utils import BytearrayStream
# 2.2
# 2.2.1
class Certificate(Struct):
"""
A structure representing a DER-encoded X.509 public key certificate.
See Section 2.2.1 of the KMIP 1.1 specification for more information.
Attributes:
certificate_type: The type of the certificate.
certificate_value: The bytes of the certificate.
"""
def __init__(self,
certificate_type=None,
certificate_value=None):
"""
Construct a Certificate object.
Args:
certificate_type (CertificateType): The type of the
certificate. Optional, defaults to None.
certificate_value (bytes): The bytes of the certificate. Optional,
defaults to None.
"""
super(Certificate, self).__init__(Tags.CERTIFICATE)
if certificate_type is None:
self.certificate_type = CertificateType()
else:
self.certificate_type = CertificateType(certificate_type)
if certificate_value is None:
self.certificate_value = CertificateValue()
else:
self.certificate_value = CertificateValue(certificate_value)
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the Certificate object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(Certificate, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.certificate_type = CertificateType()
self.certificate_value = CertificateValue()
self.certificate_type.read(tstream, kmip_version=kmip_version)
self.certificate_value.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
self.certificate_type.write(tstream, kmip_version=kmip_version)
self.certificate_value.write(tstream, kmip_version=kmip_version)
self.length = tstream.length()
super(Certificate, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def __eq__(self, other):
if isinstance(other, Certificate):
if self.certificate_type != other.certificate_type:
return False
elif self.certificate_value != other.certificate_value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Certificate):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
return "{0}(certificate_type={1}, certificate_value=b'{2}')".format(
type(self).__name__,
str(self.certificate_type),
str(self.certificate_value))
def __str__(self):
return "{0}".format(str(self.certificate_value))
# 2.2.2
class KeyBlockKey(Struct):
def __init__(self, key_block=None, tag=Tags.DEFAULT):
super(KeyBlockKey, self).__init__(tag)
self.key_block = key_block
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(KeyBlockKey, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.key_block = KeyBlock()
self.key_block.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.key_block.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(KeyBlockKey, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class SymmetricKey(KeyBlockKey):
def __init__(self, key_block=None):
super(SymmetricKey, self).__init__(key_block, Tags.SYMMETRIC_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.3
class PublicKey(KeyBlockKey):
def __init__(self, key_block=None):
super(PublicKey, self).__init__(key_block, Tags.PUBLIC_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.4
class PrivateKey(KeyBlockKey):
def __init__(self, key_block=None):
super(PrivateKey, self).__init__(key_block, Tags.PRIVATE_KEY)
self.validate()
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class SplitKey(primitives.Struct):
"""
A split key cryptographic object.
This object represents a symmetric or private key that has been split into
multiple parts. The fields of this object specify how the key was split
and how it can be reassembled.
Attributes:
split_key_parts: The total number of parts of the split key.
key_part_identifier: The ID specifying the part of the key in the key
block.
split_key_threshold: The minimum number of parts needed to reconstruct
the key.
split_key_method: The method by which the key was split.
prime_field_size: The prime field size used for the Polynomial Sharing
Prime Field split key method.
key_block: The split key part held by this object.
"""
def __init__(self,
split_key_parts=None,
key_part_identifier=None,
split_key_threshold=None,
split_key_method=None,
prime_field_size=None,
key_block=None):
"""
Construct a SplitKey object.
Args:
split_key_parts (int): An integer specifying the total number of
parts of the split key. Optional, defaults to None. Required
for read/write.
key_part_identifier (int): An integer specifying which key part is
contained in the key block. Optional, defaults to None.
Required for read/write.
split_key_threshold (int): An integer specifying the minimum number
of key parts required to reconstruct the split key. Optional,
defaults to None. Required for read/write.
split_key_method (enum): A SplitKeyMethod enumeration specifying
the method by which the key was split. Optional, defaults to
None. Required for read/write.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None. Required for read/write
only if the split key method is Polynomial Sharing Prime Field.
key_block (struct): A KeyBlock structure containing the split key
part identified by the key part identifier. Optional, defaults
to None. Required for read/write.
"""
super(SplitKey, self).__init__(enums.Tags.SPLIT_KEY)
self._split_key_parts = None
self._key_part_identifier = None
self._split_key_threshold = None
self._split_key_method = None
self._prime_field_size = None
self._key_block = None
self.split_key_parts = split_key_parts
self.key_part_identifier = key_part_identifier
self.split_key_threshold = split_key_threshold
self.split_key_method = split_key_method
self.prime_field_size = prime_field_size
self.key_block = key_block
@property
def split_key_parts(self):
if self._split_key_parts is not None:
return self._split_key_parts.value
return None
@split_key_parts.setter
def split_key_parts(self, value):
if value is None:
self._split_key_parts = None
elif isinstance(value, six.integer_types):
self._split_key_parts = primitives.Integer(
value=value,
tag=enums.Tags.SPLIT_KEY_PARTS
)
else:
raise TypeError("The split key parts must be an integer.")
@property
def key_part_identifier(self):
if self._key_part_identifier is not None:
return self._key_part_identifier.value
return None
@key_part_identifier.setter
def key_part_identifier(self, value):
if value is None:
self._key_part_identifier = None
elif isinstance(value, six.integer_types):
self._key_part_identifier = primitives.Integer(
value=value,
tag=enums.Tags.KEY_PART_IDENTIFIER
)
else:
raise TypeError("The key part identifier must be an integer.")
@property
def split_key_threshold(self):
if self._split_key_threshold is not None:
return self._split_key_threshold.value
return None
@split_key_threshold.setter
def split_key_threshold(self, value):
if value is None:
self._split_key_threshold = None
elif isinstance(value, six.integer_types):
self._split_key_threshold = primitives.Integer(
value=value,
tag=enums.Tags.SPLIT_KEY_THRESHOLD
)
else:
raise TypeError("The split key threshold must be an integer.")
@property
def split_key_method(self):
if self._split_key_method is not None:
return self._split_key_method.value
return None
@split_key_method.setter
def split_key_method(self, value):
if value is None:
self._split_key_method = None
elif isinstance(value, enums.SplitKeyMethod):
self._split_key_method = primitives.Enumeration(
enums.SplitKeyMethod,
value=value,
tag=enums.Tags.SPLIT_KEY_METHOD
)
else:
raise TypeError(
"The split key method must be a SplitKeyMethod enumeration."
)
@property
def prime_field_size(self):
if self._prime_field_size is not None:
return self._prime_field_size.value
return None
@prime_field_size.setter
def prime_field_size(self, value):
if value is None:
self._prime_field_size = None
elif isinstance(value, six.integer_types):
self._prime_field_size = primitives.BigInteger(
value=value,
tag=enums.Tags.PRIME_FIELD_SIZE
)
else:
raise TypeError("The prime field size must be an integer.")
@property
def key_block(self):
if self._key_block is not None:
return self._key_block
return None
@key_block.setter
def key_block(self, value):
if value is None:
self._key_block = None
elif isinstance(value, objects.KeyBlock):
self._key_block = value
else:
raise TypeError("The key block must be a KeyBlock structure.")
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the SplitKey object and decode it.
Args:
input_buffer (stream): A data stream containing the encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(SplitKey, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.SPLIT_KEY_PARTS, local_buffer):
self._split_key_parts = primitives.Integer(
tag=enums.Tags.SPLIT_KEY_PARTS
)
self._split_key_parts.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyParts field."
)
if self.is_tag_next(enums.Tags.KEY_PART_IDENTIFIER, local_buffer):
self._key_part_identifier = primitives.Integer(
tag=enums.Tags.KEY_PART_IDENTIFIER
)
self._key_part_identifier.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the KeyPartIdentifier field."
)
if self.is_tag_next(enums.Tags.SPLIT_KEY_THRESHOLD, local_buffer):
self._split_key_threshold = primitives.Integer(
tag=enums.Tags.SPLIT_KEY_THRESHOLD
)
self._split_key_threshold.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyThreshold field."
)
if self.is_tag_next(enums.Tags.SPLIT_KEY_METHOD, local_buffer):
self._split_key_method = primitives.Enumeration(
enums.SplitKeyMethod,
tag=enums.Tags.SPLIT_KEY_METHOD
)
self._split_key_method.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the SplitKeyMethod field."
)
if self.is_tag_next(enums.Tags.PRIME_FIELD_SIZE, local_buffer):
self._prime_field_size = primitives.BigInteger(
tag=enums.Tags.PRIME_FIELD_SIZE
)
self._prime_field_size.read(
local_buffer,
kmip_version=kmip_version
)
else:
corner_case = enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD
if self.split_key_method == corner_case:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the PrimeFieldSize "
"field. This field is required when the SplitKeyMethod is "
"PolynomialSharingPrimeField."
)
if self.is_tag_next(enums.Tags.KEY_BLOCK, local_buffer):
self._key_block = objects.KeyBlock()
self._key_block.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding(
"The SplitKey encoding is missing the KeyBlock field."
)
self.is_oversized(local_buffer)
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the SplitKey object to a buffer.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
local_buffer = utils.BytearrayStream()
if self._split_key_parts:
self._split_key_parts.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyParts field."
)
if self._key_part_identifier:
self._key_part_identifier.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the KeyPartIdentifier field."
)
if self._split_key_threshold:
self._split_key_threshold.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyThreshold field."
)
if self._split_key_method:
self._split_key_method.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the SplitKeyMethod field."
)
if self._prime_field_size:
self._prime_field_size.write(
local_buffer,
kmip_version=kmip_version
)
else:
corner_case = enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD
if self.split_key_method == corner_case:
raise exceptions.InvalidField(
"The SplitKey object is missing the PrimeFieldSize field. "
"This field is required when the SplitKeyMethod is "
"PolynomialSharingPrimeField."
)
if self._key_block:
self._key_block.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The SplitKey object is missing the KeyBlock field."
)
self.length = local_buffer.length()
super(SplitKey, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
def __repr__(self):
args = [
"split_key_parts={}".format(repr(self.split_key_parts)),
"key_part_identifier={}".format(repr(self.key_part_identifier)),
"split_key_threshold={}".format(repr(self.split_key_threshold)),
"split_key_method={}".format(self.split_key_method),
"prime_field_size={}".format(repr(self.prime_field_size)),
"key_block={}".format(repr(self.key_block))
]
return "SplitKey({})".format(", ".join(args))
def __str__(self):
# TODO (peter-hamilton) Replace str() call below with a dict() call.
value = ", ".join(
[
'"split_key_parts": {}'.format(self.split_key_parts),
'"key_part_identifier": {}'.format(self.key_part_identifier),
'"split_key_threshold": {}'.format(self.split_key_threshold),
'"split_key_method": {}'.format(self.split_key_method),
'"prime_field_size": {}'.format(self.prime_field_size),
'"key_block": {}'.format(str(self.key_block))
]
)
return "{" + value + "}"
def __eq__(self, other):
if isinstance(other, SplitKey):
if self.split_key_parts != other.split_key_parts:
return False
elif self.key_part_identifier != other.key_part_identifier:
return False
elif self.split_key_threshold != other.split_key_threshold:
return False
elif self.split_key_method != other.split_key_method:
return False
elif self.prime_field_size != other.prime_field_size:
return False
# elif self.key_block != other.key_block:
# return False
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SplitKey):
return not self.__eq__(other)
else:
return NotImplemented
# 2.2.6
class Template(Struct):
def __init__(self, attributes=None):
super(Template, self).__init__(Tags.TEMPLATE)
self.attributes = attributes
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(Template, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.attributes = list()
attribute = Attribute()
attribute.read(tstream, kmip_version=kmip_version)
self.attributes.append(attribute)
while self.is_tag_next(Tags.ATTRIBUTE, tstream):
attribute = Attribute()
attribute.read(tstream, kmip_version=kmip_version)
self.attributes.append(attribute)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
for attribute in self.attributes:
attribute.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(Template, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.7
class SecretData(Struct):
class SecretDataType(Enumeration):
def __init__(self, value=None):
super(SecretData.SecretDataType, self).__init__(
enums.SecretDataType, value, Tags.SECRET_DATA_TYPE)
def __init__(self,
secret_data_type=None,
key_block=None):
super(SecretData, self).__init__(Tags.SECRET_DATA)
self.secret_data_type = secret_data_type
self.key_block = key_block
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(SecretData, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.secret_data_type = SecretData.SecretDataType()
self.key_block = KeyBlock()
self.secret_data_type.read(tstream, kmip_version=kmip_version)
self.key_block.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.secret_data_type.write(tstream, kmip_version=kmip_version)
self.key_block.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(SecretData, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.2.8
class OpaqueObject(Struct):
class OpaqueDataType(Enumeration):
def __init__(self, value=None):
super(OpaqueObject.OpaqueDataType, self).__init__(
enums.OpaqueDataType, value, Tags.OPAQUE_DATA_TYPE)
class OpaqueDataValue(ByteString):
def __init__(self, value=None):
super(OpaqueObject.OpaqueDataValue, self).__init__(
value, Tags.OPAQUE_DATA_VALUE)
def __init__(self,
opaque_data_type=None,
opaque_data_value=None):
super(OpaqueObject, self).__init__(Tags.OPAQUE_OBJECT)
self.opaque_data_type = opaque_data_type
self.opaque_data_value = opaque_data_value
self.validate()
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(OpaqueObject, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.opaque_data_type = OpaqueObject.OpaqueDataType()
self.opaque_data_value = OpaqueObject.OpaqueDataValue()
self.opaque_data_type.read(tstream, kmip_version=kmip_version)
self.opaque_data_value.read(tstream, kmip_version=kmip_version)
self.is_oversized(tstream)
self.validate()
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
tstream = BytearrayStream()
self.opaque_data_type.write(tstream, kmip_version=kmip_version)
self.opaque_data_value.write(tstream, kmip_version=kmip_version)
# Write the length and value of the template attribute
self.length = tstream.length()
super(OpaqueObject, self).write(ostream, kmip_version=kmip_version)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
|
946 Validate Stack Sequences.py | krishna13052001/LeetCode | 872 | 12713061 | #!/usr/bin/python3
"""
Given two sequences pushed and popped with distinct values, return true if and
only if this could have been the result of a sequence of push and pop operations
on an initially empty stack.
Example 1:
Input: pushed = [1,2,3,4,5], popped = [4,5,3,2,1]
Output: true
Explanation: We might do the following sequence:
push(1), push(2), push(3), push(4), pop() -> 4,
push(5), pop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1
Example 2:
Input: pushed = [1,2,3,4,5], popped = [4,3,5,1,2]
Output: false
Explanation: 1 cannot be popped before 2.
Note:
0 <= pushed.length == popped.length <= 1000
0 <= pushed[i], popped[i] < 1000
pushed is a permutation of popped.
pushed and popped have distinct values.
"""
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
"""
maintain a stack and iterate through pushed
"""
j = 0
n = len(pushed)
stk = []
for i in range(n):
stk.append(pushed[i])
while j < n and stk and stk[-1] == popped[j]:
stk.pop()
j += 1
return j == n
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
"""
maintain a stack
"""
i = 0
j = 0
stk = []
n = len(pushed)
while i < n and j < n:
while i < n and (not stk or stk[-1] != popped[j]):
stk.append(pushed[i])
i += 1
stk.pop()
j += 1
while j < n and stk and stk[-1] == popped[j]:
stk.pop()
j += 1
return not stk
|
dizoo/gfootball/model/conv1d/conv1d_default_config.py | LuciusMos/DI-engine | 464 | 12713077 | from easydict import EasyDict
conv1d_config = dict(
feature_embedding=dict(
player=dict(
input_dim=36,
output_dim=64,
),
ball=dict(
input_dim=18,
output_dim=64,
),
left_team=dict(
input_dim=7,
output_dim=48,
conv1d_output_channel=36,
fc_output_dim=96,
),
right_team=dict(
input_dim=7,
output_dim=48,
conv1d_output_channel=36,
fc_output_dim=96,
),
left_closest=dict(
input_dim=7,
output_dim=48,
),
right_closest=dict(
input_dim=7,
output_dim=48,
)
),
fc_cat=dict(input_dim=416, ),
lstm_size=256,
policy_head=dict(
input_dim=256,
hidden_dim=164,
act_shape=19,
),
value_head=dict(input_dim=256, hidden_dim=164, output_dim=1),
)
conv1d_default_config = EasyDict(conv1d_config)
|
examples/arkane/species/CH2CHOOH/input.py | tza0035/RMG-Py | 250 | 12713084 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
modelChemistry = "CBS-QB3"
useHinderedRotors = True
useBondCorrections = False
species('CH2CHOOH', 'CH2CHOOH.py')
statmech('CH2CHOOH')
thermo('CH2CHOOH', 'Wilhoit')
|
tests/version_consistency/dummy_test.py | ldelebec/asteroid | 722 | 12713116 | <reponame>ldelebec/asteroid<filename>tests/version_consistency/dummy_test.py
def dummy_test():
pass
|
census_extractomatic/user_geo.py | censusreporter/census-api | 135 | 12713117 | <reponame>censusreporter/census-api
"""Centralize non-Flask code for 2020 User Geography data aggregation here.
This file serves both as a library for the Flask app as well as
a bootstrap for Celery tasks, which could be run with something like
celery -A census_extractomatic.user_geo:celery_app worker
"""
from datetime import timedelta
from sqlalchemy.sql import text
import json
from collections import OrderedDict
from copy import deepcopy
from tempfile import NamedTemporaryFile
import zipfile
import pandas as pd
import numpy as np
import ogr
from celery import Celery
import os
from sqlalchemy import create_engine
import boto3
from botocore.exceptions import ClientError
import logging
logger = logging.getLogger('gunicorn.error')
from timeit import default_timer as timer
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
CELERY_BROKER = os.environ['REDIS_URL']
celery_app = Celery(__name__, broker=CELERY_BROKER)
celery_db = create_engine(SQLALCHEMY_DATABASE_URI)
@celery_app.task
def join_user_geo_to_blocks_task(user_geodata_id):
join_user_to_census(celery_db, user_geodata_id)
COMPARISON_RELEASE_CODE = 'dec_pl94_compare_2020_2010'
USER_GEODATA_INSERT_SQL = text("""
INSERT INTO aggregation.user_geodata (name, hash_digest, source_url, public, fields, bbox)
VALUES (:name, :hash_digest, :source_url, :public, :fields, ST_MakeEnvelope(:xmin, :ymin, :xmax, :ymax, 4326))
RETURNING *
""")
USER_GEODATA_GEOMETRY_INSERT_SQL = text("""
INSERT INTO aggregation.user_geodata_geometry (user_geodata_id, geom, name, original_id, properties)
VALUES (:user_geodata_id,
ST_Transform(
ST_GeomFromText(:geom_wkt,:epsg),
4326),
:name,
:original_id,
:properties
)
""")
USER_GEODATA_SELECT_BY_HASH_DIGEST = text('''
SELECT user_geodata_id,
EXTRACT(EPOCH from created_at) unix_timestamp,
name,
bbox,
fields,
source_url,
status,
notes_html,
public
FROM aggregation.user_geodata
WHERE hash_digest=:hash_digest
''')
AGGREGATE_BLOCKS_2010_SQL = text("""
INSERT INTO aggregation.user_geodata_blocks_2010 (user_geodata_geometry_id, geoid)
SELECT ugg.user_geodata_geometry_id, b.geoid10
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
blocks.tabblock10 b
WHERE ug.user_geodata_id = :geodata_id
AND ug.user_geodata_id = ugg.user_geodata_id
AND ST_Intersects(ug.bbox, b.geom)
AND ST_Contains(ugg.geom,
ST_SetSRID(ST_MakePoint(b.intptlon10::double precision,
b.intptlat10::double precision),
4326))
""")
AGGREGATE_BLOCKS_2020_SQL = text("""
INSERT INTO aggregation.user_geodata_blocks_2020 (user_geodata_geometry_id, geoid)
SELECT ugg.user_geodata_geometry_id, b.geoid20
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
blocks.tabblock20 b
WHERE ug.user_geodata_id = :geodata_id
AND ug.user_geodata_id = ugg.user_geodata_id
AND ST_Intersects(ug.bbox, b.geom)
AND ST_Contains(ugg.geom,
ST_SetSRID(ST_MakePoint(b.intptlon20::double precision,
b.intptlat20::double precision),
4326))
""")
USER_GEOMETRY_SELECT_WITH_GEOM_BY_HASH_DIGEST = text('''
SELECT ugg.user_geodata_geometry_id, ugg.name, ugg.original_id, ST_asGeoJSON(ST_ForcePolygonCCW(ugg.geom))
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
''')
USER_GEOMETRY_SELECT_2020_BLOCKS_WITH_GEOM_BY_HASH_DIGEST = text('''
SELECT ug.name upload_name,
ugb.geoid,
ugg.user_geodata_geometry_id cr_geoid,
ugg.name,
ugg.original_id,
g.pop100,
g.hu100,
g.state || g.place as state_place_fips,
ST_asGeoJSON(ST_ForcePolygonCCW(b.geom)) geom
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.user_geodata_blocks_2020 ugb,
dec2020_pl94.geoheader g,
blocks.tabblock20 b
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = b.geoid20
AND b.geoid20 = g.geoid
''')
USER_GEOMETRY_SELECT_2010_BLOCKS_WITH_GEOM_BY_HASH_DIGEST = text('''
SELECT ug.name upload_name,
ugb.geoid,
ugg.user_geodata_geometry_id cr_geoid,
ugg.name,
ugg.original_id,
g.pop100,
g.hu100,
g.state || g.place as state_place_fips,
ST_asGeoJSON(ST_ForcePolygonCCW(b.geom)) geom
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.user_geodata_blocks_2010 ugb,
dec2010_pl94.geoheader g,
blocks.tabblock10 b
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = b.geoid10
AND b.geoid10 = g.geoid
''')
BLOCK_VINTAGE_TABLES = {
'dec2010_pl94': 'user_geodata_blocks_2010',
'dec2020_pl94': 'user_geodata_blocks_2020'
}
SELECT_BY_USER_GEOGRAPHY_SQL_TEMPLATE = """
SELECT ugg.user_geodata_geometry_id,
ugg.name,
ugg.original_id,
ST_asGeoJSON(ST_ForcePolygonCCW(ugg.geom)) geom,
d.*
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.{blocks_vintage_table} ugb,
{schema}.{table_code} d
WHERE ug.hash_digest = :hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = d.geoid
"""
def fetch_user_geodata(db, hash_digest):
with db.engine.begin() as con:
cur = con.execute(USER_GEODATA_SELECT_BY_HASH_DIGEST,hash_digest=hash_digest)
keys = list(cur._metadata.keys)
row = cur.first()
if row:
return dict(zip(keys,row))
return None
def _fieldsFromOGRLayer(layer):
fields = []
ldefn = layer.GetLayerDefn()
for n in range(ldefn.GetFieldCount()):
fdefn = ldefn.GetFieldDefn(n)
fields.append(fdefn.name)
return fields
def save_user_geojson(db,
geojson_str,
hash_digest,
dataset_name,
name_field,
id_field,
source_url,
share_checked):
tmp = NamedTemporaryFile('w',suffix='.json',delete=False)
tmp.write(geojson_str)
tmp.close()
ogr_file = ogr.Open(tmp.name)
if ogr_file is None:
raise ValueError(f"ogr.Open failed for {tmp.name}")
# assume geojson always has one layer, right?
l = ogr_file.GetLayer(0)
epsg = l.GetSpatialRef().GetAuthorityCode(None)
(xmin, xmax, ymin, ymax) = l.GetExtent()
dataset_id = None
fields = _fieldsFromOGRLayer(l)
with db.engine.begin() as con:
cur = con.execute(USER_GEODATA_INSERT_SQL,
name=dataset_name,
hash_digest=hash_digest,
source_url=source_url,
public=share_checked,
fields=json.dumps(fields),
xmin=xmin,
ymin=ymin,
xmax=xmax,
ymax=ymax)
dataset_id = cur.fetchall()[0][0]
for i in range(0,l.GetFeatureCount()):
f = l.GetFeature(i)
mp = ogr.ForceToMultiPolygon(f.GetGeometryRef())
properties = dict((fld, f.GetField(i)) for i,fld in enumerate(fields))
con.execute(USER_GEODATA_GEOMETRY_INSERT_SQL,
user_geodata_id=dataset_id,
geom_wkt=mp.ExportToWkt(),
epsg=epsg,
name=properties.get(name_field),
original_id=properties.get(id_field),
properties=json.dumps(properties))
if dataset_id is not None:
join_user_geo_to_blocks_task.delay(dataset_id)
return dataset_id
def list_user_geographies(db):
cur = db.engine.execute('select *, st_asGeoJSON(bbox) bbox_json from aggregation.user_geodata where public = true order by name')
results = []
for row in cur:
d = dict(row)
bbox_json = d.pop('bbox_json')
# parse JSON string and get rid of binary bbox
if bbox_json:
d['bbox'] = json.loads(bbox_json)
else:
del d['bbox']
results.append(d)
return results
def join_user_to_census(db, user_geodata_id):
"""Waffling a little on structure but this provides a single transaction-protected function which computes block joins
for all user geographies associated with a specified user geo dataset, including clearing out anything which
might have been there (shouldn't really be) and managing the status.
"""
# first set the status in its own transaction so that it serves as a sign that the work is happening.
# we may want to check the status to make sure it isn't already processing to avoid overlapping jobs
# although the delete statements should mean that isn't a terrible problem, just a longer CPU load
db.engine.execute(text("UPDATE aggregation.user_geodata SET status = 'PROCESSING' where user_geodata_id = :geodata_id"),geodata_id=user_geodata_id)
with db.engine.begin() as con:
con.execute(text("""
DELETE FROM aggregation.user_geodata_blocks_2010
WHERE user_geodata_geometry_id in
(SELECT user_geodata_geometry_id FROM aggregation.user_geodata_geometry
WHERE user_geodata_id=:geodata_id)"""),geodata_id=user_geodata_id)
con.execute(text("""
DELETE FROM aggregation.user_geodata_blocks_2020
WHERE user_geodata_geometry_id in
(SELECT user_geodata_geometry_id FROM aggregation.user_geodata_geometry
WHERE user_geodata_id=:geodata_id)"""),geodata_id=user_geodata_id)
con.execute(AGGREGATE_BLOCKS_2010_SQL,geodata_id=user_geodata_id)
con.execute(AGGREGATE_BLOCKS_2020_SQL,geodata_id=user_geodata_id)
db.engine.execute(text("UPDATE aggregation.user_geodata SET status = 'READY' where user_geodata_id = :geodata_id"),geodata_id=user_geodata_id)
def _blankFeatureCollection():
return {
"type": "FeatureCollection",
"features": []
}
def fetch_user_geog_as_geojson(db, hash_digest):
geojson = _blankFeatureCollection()
cur = db.engine.execute(USER_GEOMETRY_SELECT_WITH_GEOM_BY_HASH_DIGEST,hash_digest=hash_digest)
if cur.rowcount == 0:
raise ValueError(f"Invalid geography ID {hash_digest}")
for cr_geoid, name, original_id, geojson_str in cur:
base = {
'type': 'Feature'
}
base['geometry'] = json.loads(geojson_str)
base['properties'] = {
'cr_geoid': cr_geoid
}
if name is not None: base['properties']['name'] = name
if original_id is not None:
base['properties']['original_id'] = original_id
base['id'] = original_id
geojson['features'].append(base)
return geojson
USER_BLOCKS_BY_HASH_DIGEST_SQL = {
'2020': USER_GEOMETRY_SELECT_2020_BLOCKS_WITH_GEOM_BY_HASH_DIGEST,
'2010': USER_GEOMETRY_SELECT_2010_BLOCKS_WITH_GEOM_BY_HASH_DIGEST
}
def fetch_metadata(release=None, table_code=None):
# for now we'll just do it from literal objects here but deepcopy them so we don't get messed up
# maybe later we'll make a metadata schema in the database
if table_code is None:
raise Exception('Table code must be specified for metadata fetch')
md = METADATA.get(table_code.lower())
if md:
if release is None or release in md['releases']:
return deepcopy(md)
if release == COMPARISON_RELEASE_CODE:
c_10 = []
c_20 = []
c_change = []
base = deepcopy(md)
for col,label in md['columns'].items():
c_10.append((f"{col}_2010", f"{label} (2010)"))
c_20.append((f"{col}_2020", f"{label} (2020)"))
c_change.append((f"{col}_pct_chg", f"{label} (% change)"))
base['columns'] = OrderedDict(c_20 + c_10 + c_change)
return base
return None
def evaluateUserGeographySQLTemplate(schema, table_code):
"""Schemas and table names can't be handled as bindparams with SQLAlchemy, so
this allows us to use a 'select *' syntax for multiple tables.
"""
try:
blocks_vintage_table = BLOCK_VINTAGE_TABLES[schema]
except KeyError:
raise ValueError(f"No blocks vintage identified for given schema {schema}")
return SELECT_BY_USER_GEOGRAPHY_SQL_TEMPLATE.format(schema=schema, table_code=table_code, blocks_vintage_table=blocks_vintage_table)
def aggregate_decennial(db, hash_digest, release, table_code):
"""For the given user geography, identified by hash_digest, aggregate the given table
for the given decennial census release, and return a Pandas dataframe with the results.
In addition to the data columns for the given table, the dataframe may include columns
'name' and/or 'original_id', if the user geography identified sources for those in their
upload.
"""
if fetch_metadata(release=release, table_code=table_code):
sql = evaluateUserGeographySQLTemplate(release, table_code)
query = text(sql).bindparams(hash_digest=hash_digest)
logger.info(f'aggregate_decennial: starting timer {hash_digest} {release} {table_code}')
start = timer()
df = pd.read_sql(query, db.engine)
end = timer()
logger.info(f"pd.read_sql {hash_digest} {release} {table_code} elapsed time {timedelta(seconds=end-start)}")
df = df.drop('geoid',axis=1) # we don't care about the original blocks after we groupby
agg_funcs = dict((c,'sum') for c in df.columns[1:])
agg_funcs['name'] = 'first' # these string values are
agg_funcs['original_id'] = 'first' # the same for each row aggregated
agg_funcs['geom'] = 'first' # by 'user_geodata_geometry_id'
aggd = df.groupby('user_geodata_geometry_id').agg(agg_funcs)
for c in ['name', 'original_id']:
if aggd[c].isnull().all():
aggd = aggd.drop(c,axis=1)
aggd = aggd.reset_index()
end = timer()
logger.info(f"all processing {hash_digest} {release} {table_code} total elapsed time {timedelta(seconds=end-start)}")
return aggd
raise ValueError('Invalid release or table code')
def aggregate_decennial_comparison(db, hash_digest, table_code):
agg_2020 = aggregate_decennial(db, hash_digest, 'dec2020_pl94', table_code).set_index('user_geodata_geometry_id')
agg_2010 = aggregate_decennial(db, hash_digest, 'dec2010_pl94', table_code).set_index('user_geodata_geometry_id')
# not all uploads have all columns, so be responsive to the data
label_cols = []
for c in ['name', 'original_id', 'geom']:
if c in agg_2020:
label_cols.append(c)
label_df = agg_2020[label_cols]
agg_2020 = agg_2020.drop(label_cols,axis=1)
agg_2010 = agg_2010.drop(label_cols,axis=1)
pct_chg = (agg_2020-agg_2010)/agg_2010
joined = agg_2020.join(agg_2010,lsuffix='_2020',rsuffix='_2010')
joined = joined.join(pct_chg.rename(columns=lambda x: f"{x}_change"))
return label_df.join(joined).reset_index()
def dataframe_to_feature_collection(df: pd.DataFrame, geom_col):
"""Given a Pandas dataframe with one column stringified GeoJSON, return a
dict representing a GeoJSON FeatureCollection, where `geom_col` is parsed and
used for the 'geometry' and the rest of the row is converted to a 'properties' dict."""
geojson = {
"type": "FeatureCollection",
"features": []
}
for _, row in df.iterrows():
row = row.to_dict()
geom = row.pop(geom_col)
f = {
'type': 'Feature',
'geometry': json.loads(geom),
'properties': row
}
if 'original_id' in row:
f['id'] = row['original_id']
geojson['features'].append(f)
return geojson
def create_block_xref_download(db, hash_digest, year):
try:
sql = USER_BLOCKS_BY_HASH_DIGEST_SQL[str(year)]
except KeyError:
raise ValueError(f"Invalid year {year}")
df = pd.read_sql(sql.bindparams(hash_digest=hash_digest),db.engine)
user_geo_name = str(df['upload_name'].unique().squeeze())
df = df.drop('upload_name', axis=1)
metadata = {
'title': f"Census Reporter {year} Block Assignments for {user_geo_name}",
'columns': OrderedDict((
('geoid', f'15-character unique block identifier'),
('cr_geoid', '''An arbitrary unique identifier for a specific geography (e.g. neighborhood) included in a user uploaded map'''),
('name', 'A name for a specific geography included in a user uploaded map, if available'),
('original_id', 'A unique identifier for a specific geography included in a user uploaded map, from the original source, if available'),
('pop100', f'The total population for the given block (Decennial Census {year})'),
('hu100', f'The total housing units (occupied or vacant) for the given block (Decennial Census {year})'),
('state_place_fips', f'The combined State/Place FIPS code for the given block (Decennial Census {year})'),
))
}
release = f'tiger{year}'
table_code = 'block_assignments'
tmp = write_compound_zipfile(hash_digest, release, table_code, df, metadata)
remote_filename = build_filename(hash_digest, year, 'block_assignments', 'zip')
move_file_to_s3(tmp.name,hash_digest,remote_filename)
return tmp
def create_aggregate_download(db, hash_digest, release, table_code):
if release == COMPARISON_RELEASE_CODE:
aggregated = aggregate_decennial_comparison(db, hash_digest, table_code)
else:
aggregated = aggregate_decennial(db, hash_digest, release, table_code)
metadata = fetch_metadata(release=release, table_code=table_code)
if 'original_id' in aggregated: # original id is second if its there so insert it first
metadata['columns']['original_id'] = 'Geographic Identifier'
metadata['columns'].move_to_end('original_id', last=False)
if 'name' in aggregated: # name is first if its there
metadata['columns']['name'] = 'Geography Name'
metadata['columns'].move_to_end('name', last=False)
# only need it if there's no name or ID. will we even tolerate that?
if 'name' in aggregated or 'original_id' in aggregated:
aggregated = aggregated.drop('user_geodata_geometry_id', axis=1)
else:
aggregated = aggregated.rename(columns={'user_geodata_geometry_id': 'cr_geoid'})
metadata['columns']['cr_geoid'] = 'Census Reporter Geography ID'
metadata['columns'].move_to_end('cr_geoid', last=False)
# NaN and inf bork JSON and inf looks bad in CSV too.
# Any columns could have NaN, not just pct_chg -- e.g. Atlanta has n'hoods which get no 2010 blocks
aggregated = aggregated.replace([np.inf, -np.inf, np.nan],'')
tmp = write_compound_zipfile(hash_digest, release, table_code, aggregated, metadata)
remote_filename = build_filename(hash_digest, release, table_code, 'zip')
move_file_to_s3(tmp.name,hash_digest,remote_filename)
return tmp
def write_compound_zipfile(hash_digest, release, table_code, df, metadata):
"""Given a dataframe with a 'geom' column,
create a ZipFile with the data from that dataframe
in both CSV and GeoJSON, returning a semi-persistent
temporary file.
"""
with NamedTemporaryFile('wb',suffix='.zip',delete=False) as tmp:
with zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as zf:
zf.writestr(build_filename(hash_digest, release, table_code, 'csv'), df.drop('geom', axis=1).to_csv(index=False))
zf.writestr(build_filename(hash_digest, release, table_code, 'geojson'), json.dumps(dataframe_to_feature_collection(df, 'geom')))
zf.writestr(f'metadata.json', json.dumps(metadata,indent=2))
zf.close()
return tmp
def move_file_to_s3(local_filename, hash_digest, destination_filename):
"""Considered making this a celery task, but don't think the file created on `web` is available on `worker`
so lets wait to see if we even need the async.
"""
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(local_filename,
"files.censusreporter.org",
f"aggregation/{hash_digest}/{destination_filename}",
ExtraArgs={'ACL': 'public-read'})
except ClientError as e:
logger.error(e)
return False
return True
def build_filename(hash_digest, release, table_code, extension):
return f'{release}_{hash_digest}_{table_code}.{extension}'
METADATA = {
'p1': {
'title': 'Race',
'releases': ['dec2010_pl94', 'dec2020_pl94'],
'columns': OrderedDict((
('P0010001', 'P1-1: Total'),
('P0010002', 'P1-2: Population of one race'),
('P0010003', 'P1-3: White alone'),
('P0010004', 'P1-4: Black or African American alone'),
('P0010005', 'P1-5: American Indian and Alaska Native alone'),
('P0010006', 'P1-6: Asian alone'),
('P0010007', 'P1-7: Native Hawaiian and Other Pacific Islander alone'),
('P0010008', 'P1-8: Some other race alone'),
('P0010009', 'P1-9: Population of two or more races'),
('P0010010', 'P1-10: Population of two races'),
('P0010011', 'P1-11: White; Black or African American'),
('P0010012', 'P1-12: White; American Indian and Alaska Native'),
('P0010013', 'P1-13: White; Asian'),
('P0010014', 'P1-14: White; Native Hawaiian and Other Pacific Islander'),
('P0010015', 'P1-15: White; Some other race'),
('P0010016', 'P1-16: Black or African American; American Indian and Alaska Native'),
('P0010017', 'P1-17: Black or African American; Asian'),
('P0010018', 'P1-18: Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0010019', 'P1-19: Black or African American; Some other race'),
('P0010020', 'P1-20: American Indian and Alaska Native; Asian'),
('P0010021', 'P1-21: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0010022', 'P1-22: American Indian and Alaska Native; Some other race'),
('P0010023', 'P1-23: Asian; Native Hawaiian and Other Pacific Islander'),
('P0010024', 'P1-24: Asian; Some other race'),
('P0010025', 'P1-25: Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010026', 'P1-26: Population of three races'),
('P0010027', 'P1-27: White; Black or African American; American Indian and Alaska Native'),
('P0010028', 'P1-28: White; Black or African American; Asian'),
('P0010029', 'P1-29: White; Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0010030', 'P1-30: White; Black or African American; Some other race'),
('P0010031', 'P1-31: White; American Indian and Alaska Native; Asian'),
('P0010032', 'P1-32: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0010033', 'P1-33: White; American Indian and Alaska Native; Some other race'),
('P0010034', 'P1-34: White; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010035', 'P1-35: White; Asian; Some other race'),
('P0010036', 'P1-36: White; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010037', 'P1-37: Black or African American; American Indian and Alaska Native; Asian'),
('P0010038', 'P1-38: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0010039', 'P1-39: Black or African American; American Indian and Alaska Native; Some other race'),
('P0010040', 'P1-40: Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010041', 'P1-41: Black or African American; Asian; Some other race'),
('P0010042', 'P1-42: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010043', 'P1-43: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010044', 'P1-44: American Indian and Alaska Native; Asian; Some other race'),
('P0010045', 'P1-45: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010046', 'P1-46: Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010047', 'P1-47: Population of four races'),
('P0010048', 'P1-48: White; Black or African American; American Indian and Alaska Native; Asian'),
('P0010049', 'P1-49: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0010050', 'P1-50: White; Black or African American; American Indian and Alaska Native; Some other race'),
('P0010051', 'P1-51: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010052', 'P1-52: White; Black or African American; Asian; Some other race'),
('P0010053', 'P1-53: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010054', 'P1-54: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010055', 'P1-55: White; American Indian and Alaska Native; Asian; Some other race'),
('P0010056', 'P1-56: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010057', 'P1-57: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010058', 'P1-58: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010059', 'P1-59: Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0010060', 'P1-60: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010061', 'P1-61: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010062', 'P1-62: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010063', 'P1-63: Population of five races'),
('P0010064', 'P1-64: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0010065', 'P1-65: White; Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0010066', 'P1-66: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010067', 'P1-67: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010068', 'P1-68: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010069', 'P1-69: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0010070', 'P1-70: Population of six races'),
('P0010071', 'P1-71: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))
},
'p2': {
'title': 'Hispanic or Latino, and not Hispanic or Latino by Race',
'releases': ['dec2010_pl94', 'dec2020_pl94'],
'columns': OrderedDict((
('P0020001', 'P2-1: Total'),
('P0020002', 'P2-2: Hispanic or Latino'),
('P0020003', 'P2-3: Not Hispanic or Latino'),
('P0020004', 'P2-4: Population of one race'),
('P0020005', 'P2-5: White alone'),
('P0020006', 'P2-6: Black or African American alone'),
('P0020007', 'P2-7: American Indian and Alaska Native alone'),
('P0020008', 'P2-8: Asian alone'),
('P0020009', 'P2-9: Native Hawaiian and Other Pacific Islander alone'),
('P0020010', 'P2-10: Some other race alone'),
('P0020011', 'P2-11: Population of two or more races'),
('P0020012', 'P2-12: Population of two races'),
('P0020013', 'P2-13: White; Black or African American'),
('P0020014', 'P2-14: White; American Indian and Alaska Native'),
('P0020015', 'P2-15: White; Asian'),
('P0020016', 'P2-16: White; Native Hawaiian and Other Pacific Islander'),
('P0020017', 'P2-17: White; Some other race'),
('P0020018', 'P2-18: Black or African American; American Indian and Alaska Native'),
('P0020019', 'P2-19: Black or African American; Asian'),
('P0020020', 'P2-20: Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0020021', 'P2-21: Black or African American; Some other race'),
('P0020022', 'P2-22: American Indian and Alaska Native; Asian'),
('P0020023', 'P2-23: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0020024', 'P2-24: American Indian and Alaska Native; Some other race'),
('P0020025', 'P2-25: Asian; Native Hawaiian and Other Pacific Islander'),
('P0020026', 'P2-26: Asian; Some other race'),
('P0020027', 'P2-27: Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020028', 'P2-28: Population of three races'),
('P0020029', 'P2-29: White; Black or African American; American Indian and Alaska Native'),
('P0020030', 'P2-30: White; Black or African American; Asian'),
('P0020031', 'P2-31: White; Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0020032', 'P2-32: White; Black or African American; Some other race'),
('P0020033', 'P2-33: White; American Indian and Alaska Native; Asian'),
('P0020034', 'P2-34: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0020035', 'P2-35: White; American Indian and Alaska Native; Some other race'),
('P0020036', 'P2-36: White; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020037', 'P2-37: White; Asian; Some other race'),
('P0020038', 'P2-38: White; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020039', 'P2-39: Black or African American; American Indian and Alaska Native; Asian'),
('P0020040', 'P2-40: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0020041', 'P2-41: Black or African American; American Indian and Alaska Native; Some other race'),
('P0020042', 'P2-42: Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020043', 'P2-43: Black or African American; Asian; Some other race'),
('P0020044', 'P2-44: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020045', 'P2-45: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020046', 'P2-46: American Indian and Alaska Native; Asian; Some other race'),
('P0020047', 'P2-47: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020048', 'P2-48: Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020049', 'P2-49: Population of four races'),
('P0020050', 'P2-50: White; Black or African American; American Indian and Alaska Native; Asian'),
('P0020051', 'P2-51: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0020052', 'P2-52: White; Black or African American; American Indian and Alaska Native; Some other race'),
('P0020053', 'P2-53: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020054', 'P2-54: White; Black or African American; Asian; Some other race'),
('P0020055', 'P2-55: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020056', 'P2-56: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020057', 'P2-57: White; American Indian and Alaska Native; Asian; Some other race'),
('P0020058', 'P2-58: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020059', 'P2-59: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020060', 'P2-60: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020061', 'P2-61: Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0020062', 'P2-62: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020063', 'P2-63: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020064', 'P2-64: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020065', 'P2-65: Population of five races'),
('P0020066', 'P2-66: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0020067', 'P2-67: White; Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0020068', 'P2-68: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020069', 'P2-69: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020070', 'P2-70: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020071', 'P2-71: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0020072', 'P2-72: Population of six races'),
('P0020073', 'P2-73: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))
},
'p3': {
'title': 'Race for the Population 18 Years and Over',
'releases': ['dec2010_pl94', 'dec2020_pl94'],
'columns': OrderedDict((
('P0030001', 'P3-1: Total'),
('P0030002', 'P3-2: Population of one race'),
('P0030003', 'P3-3: White alone'),
('P0030004', 'P3-4: Black or African American alone'),
('P0030005', 'P3-5: American Indian and Alaska Native alone'),
('P0030006', 'P3-6: Asian alone'),
('P0030007', 'P3-7: Native Hawaiian and Other Pacific Islander alone'),
('P0030008', 'P3-8: Some other race alone'),
('P0030009', 'P3-9: Population of two or more races'),
('P0030010', 'P3-10: Population of two races'),
('P0030011', 'P3-11: White; Black or African American'),
('P0030012', 'P3-12: White; American Indian and Alaska Native'),
('P0030013', 'P3-13: White; Asian'),
('P0030014', 'P3-14: White; Native Hawaiian and Other Pacific Islander'),
('P0030015', 'P3-15: White; Some other race'),
('P0030016', 'P3-16: Black or African American; American Indian and Alaska Native'),
('P0030017', 'P3-17: Black or African American; Asian'),
('P0030018', 'P3-18: Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0030019', 'P3-19: Black or African American; Some other race'),
('P0030020', 'P3-20: American Indian and Alaska Native; Asian'),
('P0030021', 'P3-21: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0030022', 'P3-22: American Indian and Alaska Native; Some other race'),
('P0030023', 'P3-23: Asian; Native Hawaiian and Other Pacific Islander'),
('P0030024', 'P3-24: Asian; Some other race'),
('P0030025', 'P3-25: Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030026', 'P3-26: Population of three races'),
('P0030027', 'P3-27: White; Black or African American; American Indian and Alaska Native'),
('P0030028', 'P3-28: White; Black or African American; Asian'),
('P0030029', 'P3-29: White; Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0030030', 'P3-30: White; Black or African American; Some other race'),
('P0030031', 'P3-31: White; American Indian and Alaska Native; Asian'),
('P0030032', 'P3-32: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0030033', 'P3-33: White; American Indian and Alaska Native; Some other race'),
('P0030034', 'P3-34: White; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030035', 'P3-35: White; Asian; Some other race'),
('P0030036', 'P3-36: White; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030037', 'P3-37: Black or African American; American Indian and Alaska Native; Asian'),
('P0030038', 'P3-38: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0030039', 'P3-39: Black or African American; American Indian and Alaska Native; Some other race'),
('P0030040', 'P3-40: Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030041', 'P3-41: Black or African American; Asian; Some other race'),
('P0030042', 'P3-42: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030043', 'P3-43: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030044', 'P3-44: American Indian and Alaska Native; Asian; Some other race'),
('P0030045', 'P3-45: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030046', 'P3-46: Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030047', 'P3-47: Population of four races'),
('P0030048', 'P3-48: White; Black or African American; American Indian and Alaska Native; Asian'),
('P0030049', 'P3-49: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0030050', 'P3-50: White; Black or African American; American Indian and Alaska Native; Some other race'),
('P0030051', 'P3-51: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030052', 'P3-52: White; Black or African American; Asian; Some other race'),
('P0030053', 'P3-53: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030054', 'P3-54: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030055', 'P3-55: White; American Indian and Alaska Native; Asian; Some other race'),
('P0030056', 'P3-56: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030057', 'P3-57: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030058', 'P3-58: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030059', 'P3-59: Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0030060', 'P3-60: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030061', 'P3-61: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030062', 'P3-62: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030063', 'P3-63: Population of five races'),
('P0030064', 'P3-64: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0030065', 'P3-65: White; Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0030066', 'P3-66: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030067', 'P3-67: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030068', 'P3-68: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030069', 'P3-69: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0030070', 'P3-70: Population of six races'),
('P0030071', 'P3-71: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))
},
'p4': {
'title': 'Hispanic or Latino, and not Hispanic or Latino by Race for the Population 18 Years and Over',
'releases': ['dec2010_pl94', 'dec2020_pl94'],
'columns': OrderedDict((
('P0040001', 'P4-1: Total'),
('P0040002', 'P4-2: Hispanic or Latino'),
('P0040003', 'P4-3: Not Hispanic or Latino'),
('P0040004', 'P4-4: Population of one race'),
('P0040005', 'P4-5: White alone'),
('P0040006', 'P4-6: Black or African American alone'),
('P0040007', 'P4-7: American Indian and Alaska Native alone'),
('P0040008', 'P4-8: Asian alone'),
('P0040009', 'P4-9: Native Hawaiian and Other Pacific Islander alone'),
('P0040010', 'P4-10: Some other race alone'),
('P0040011', 'P4-11: Population of two or more races'),
('P0040012', 'P4-12: Population of two races'),
('P0040013', 'P4-13: White; Black or African American'),
('P0040014', 'P4-14: White; American Indian and Alaska Native'),
('P0040015', 'P4-15: White; Asian'),
('P0040016', 'P4-16: White; Native Hawaiian and Other Pacific Islander'),
('P0040017', 'P4-17: White; Some other race'),
('P0040018', 'P4-18: Black or African American; American Indian and Alaska Native'),
('P0040019', 'P4-19: Black or African American; Asian'),
('P0040020', 'P4-20: Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0040021', 'P4-21: Black or African American; Some other race'),
('P0040022', 'P4-22: American Indian and Alaska Native; Asian'),
('P0040023', 'P4-23: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0040024', 'P4-24: American Indian and Alaska Native; Some other race'),
('P0040025', 'P4-25: Asian; Native Hawaiian and Other Pacific Islander'),
('P0040026', 'P4-26: Asian; Some other race'),
('P0040027', 'P4-27: Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040028', 'P4-28: Population of three races'),
('P0040029', 'P4-29: White; Black or African American; American Indian and Alaska Native'),
('P0040030', 'P4-30: White; Black or African American; Asian'),
('P0040031', 'P4-31: White; Black or African American; Native Hawaiian and Other Pacific Islander'),
('P0040032', 'P4-32: White; Black or African American; Some other race'),
('P0040033', 'P4-33: White; American Indian and Alaska Native; Asian'),
('P0040034', 'P4-34: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0040035', 'P4-35: White; American Indian and Alaska Native; Some other race'),
('P0040036', 'P4-36: White; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040037', 'P4-37: White; Asian; Some other race'),
('P0040038', 'P4-38: White; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040039', 'P4-39: Black or African American; American Indian and Alaska Native; Asian'),
('P0040040', 'P4-40: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0040041', 'P4-41: Black or African American; American Indian and Alaska Native; Some other race'),
('P0040042', 'P4-42: Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040043', 'P4-43: Black or African American; Asian; Some other race'),
('P0040044', 'P4-44: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040045', 'P4-45: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040046', 'P4-46: American Indian and Alaska Native; Asian; Some other race'),
('P0040047', 'P4-47: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040048', 'P4-48: Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040049', 'P4-49: Population of four races'),
('P0040050', 'P4-50: White; Black or African American; American Indian and Alaska Native; Asian'),
('P0040051', 'P4-51: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander'),
('P0040052', 'P4-52: White; Black or African American; American Indian and Alaska Native; Some other race'),
('P0040053', 'P4-53: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040054', 'P4-54: White; Black or African American; Asian; Some other race'),
('P0040055', 'P4-55: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040056', 'P4-56: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040057', 'P4-57: White; American Indian and Alaska Native; Asian; Some other race'),
('P0040058', 'P4-58: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040059', 'P4-59: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040060', 'P4-60: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040061', 'P4-61: Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0040062', 'P4-62: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040063', 'P4-63: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040064', 'P4-64: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040065', 'P4-65: Population of five races'),
('P0040066', 'P4-66: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander'),
('P0040067', 'P4-67: White; Black or African American; American Indian and Alaska Native; Asian; Some other race'),
('P0040068', 'P4-68: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040069', 'P4-69: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040070', 'P4-70: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040071', 'P4-71: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'),
('P0040072', 'P4-72: Population of six races'),
('P0040073', 'P4-73: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'))),
},
'p5': {
'title': 'Group Quarters Population by Major Group Quarters Type',
'releases': ['dec2020_pl94'],
'columns': OrderedDict((
('P0050001', 'Total:'),
('P0050002', 'Institutionalized population:'),
('P0050003', 'Correctional facilities for adults'),
('P0050004', 'Juvenile facilities'),
('P0050005', 'Nursing facilities/Skilled-nursing facilities'),
('P0050006', 'Other institutional facilities'),
('P0050007', 'Noninstitutionalized population:'),
('P0050008', 'College/University student housing'),
('P0050009', 'Military quarters'),
('P0050010', 'Other noninstitutional facilities'),
))
},
'h1': {
'title': 'Occupancy Status',
'releases': ['dec2010_pl94', 'dec2020_pl94'],
'columns': OrderedDict((
('H0010001', 'H1-1: Total'),
('H0010002', 'H1-2: Occupied'),
('H0010003', 'H1-3: Vacant'))),
}
}
|
mccolors/getcolors.py | wangtt03/raspberryjammod | 338 | 12713179 | <reponame>wangtt03/raspberryjammod
from PIL import Image
from os import listdir
def averageColor(filename):
image = Image.open(filename).convert('RGB')
r,g,b = 0.,0.,0.
pixels = image.size[0] * image.size[1]
for x in range(image.size[0]):
for y in range(image.size[1]):
rgb = image.getpixel((x,y))
r += rgb[0]
g += rgb[1]
b += rgb[2]
image.close()
return int(round(r/pixels)), int(round(g/pixels)), int(round(b/pixels))
print("colorDictionary={")
for f in listdir('assets/minecraft/textures/blocks'):
if f.lower().endswith(".png"):
print(" '"+f[:-4]+"': "+str(averageColor('assets/minecraft/textures/blocks/'+f))+",")
print("}");
|
Pyto/Samples/Matplotlib/polar_demo.py | snazari/Pyto | 701 | 12713182 | <filename>Pyto/Samples/Matplotlib/polar_demo.py<gh_stars>100-1000
"""
==========
Polar Demo
==========
Demo of a line plot on a polar axis.
"""
import numpy as np
import matplotlib.pyplot as plt
r = np.arange(0, 2, 0.01)
theta = 2 * np.pi * r
ax = plt.subplot(111, projection='polar')
ax.plot(theta, r)
ax.set_rmax(2)
ax.set_rticks([0.5, 1, 1.5, 2]) # Less radial ticks
ax.set_rlabel_position(-22.5) # Move radial labels away from plotted line
ax.grid(True)
ax.set_title("A line plot on a polar axis", va='bottom')
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
import matplotlib
matplotlib.axes.Axes.plot
matplotlib.projections.polar
matplotlib.projections.polar.PolarAxes
matplotlib.projections.polar.PolarAxes.set_rticks
matplotlib.projections.polar.PolarAxes.set_rmax
matplotlib.projections.polar.PolarAxes.set_rlabel_position
|
src/oci/key_management/models/vault_usage.py | Manny27nyc/oci-python-sdk | 249 | 12713194 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class VaultUsage(object):
"""
VaultUsage model.
"""
def __init__(self, **kwargs):
"""
Initializes a new VaultUsage object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key_count:
The value to assign to the key_count property of this VaultUsage.
:type key_count: int
:param key_version_count:
The value to assign to the key_version_count property of this VaultUsage.
:type key_version_count: int
:param software_key_count:
The value to assign to the software_key_count property of this VaultUsage.
:type software_key_count: int
:param software_key_version_count:
The value to assign to the software_key_version_count property of this VaultUsage.
:type software_key_version_count: int
"""
self.swagger_types = {
'key_count': 'int',
'key_version_count': 'int',
'software_key_count': 'int',
'software_key_version_count': 'int'
}
self.attribute_map = {
'key_count': 'keyCount',
'key_version_count': 'keyVersionCount',
'software_key_count': 'softwareKeyCount',
'software_key_version_count': 'softwareKeyVersionCount'
}
self._key_count = None
self._key_version_count = None
self._software_key_count = None
self._software_key_version_count = None
@property
def key_count(self):
"""
**[Required]** Gets the key_count of this VaultUsage.
The number of keys in this vault that persist on a hardware security module (HSM), across all compartments, excluding keys in a `DELETED` state.
:return: The key_count of this VaultUsage.
:rtype: int
"""
return self._key_count
@key_count.setter
def key_count(self, key_count):
"""
Sets the key_count of this VaultUsage.
The number of keys in this vault that persist on a hardware security module (HSM), across all compartments, excluding keys in a `DELETED` state.
:param key_count: The key_count of this VaultUsage.
:type: int
"""
self._key_count = key_count
@property
def key_version_count(self):
"""
**[Required]** Gets the key_version_count of this VaultUsage.
The number of key versions in this vault that persist on a hardware security module (HSM), across all compartments, excluding key versions in a `DELETED` state.
:return: The key_version_count of this VaultUsage.
:rtype: int
"""
return self._key_version_count
@key_version_count.setter
def key_version_count(self, key_version_count):
"""
Sets the key_version_count of this VaultUsage.
The number of key versions in this vault that persist on a hardware security module (HSM), across all compartments, excluding key versions in a `DELETED` state.
:param key_version_count: The key_version_count of this VaultUsage.
:type: int
"""
self._key_version_count = key_version_count
@property
def software_key_count(self):
"""
Gets the software_key_count of this VaultUsage.
The number of keys in this vault that persist on the server, across all compartments, excluding keys in a `DELETED` state.
:return: The software_key_count of this VaultUsage.
:rtype: int
"""
return self._software_key_count
@software_key_count.setter
def software_key_count(self, software_key_count):
"""
Sets the software_key_count of this VaultUsage.
The number of keys in this vault that persist on the server, across all compartments, excluding keys in a `DELETED` state.
:param software_key_count: The software_key_count of this VaultUsage.
:type: int
"""
self._software_key_count = software_key_count
@property
def software_key_version_count(self):
"""
Gets the software_key_version_count of this VaultUsage.
The number of key versions in this vault that persist on the server, across all compartments, excluding key versions in a `DELETED` state.
:return: The software_key_version_count of this VaultUsage.
:rtype: int
"""
return self._software_key_version_count
@software_key_version_count.setter
def software_key_version_count(self, software_key_version_count):
"""
Sets the software_key_version_count of this VaultUsage.
The number of key versions in this vault that persist on the server, across all compartments, excluding key versions in a `DELETED` state.
:param software_key_version_count: The software_key_version_count of this VaultUsage.
:type: int
"""
self._software_key_version_count = software_key_version_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
tests/integration-v1/cattletest/core/test_ha_config.py | lifecontrol/cattle | 482 | 12713205 | from common import * # NOQA
import json
@pytest.mark.nonparallel
def test_ha_config(admin_user_client):
ha_config = find_one(admin_user_client.list_ha_config)
admin_user_client.update(ha_config, enabled=False)
ha_config = find_one(admin_user_client.list_ha_config)
assert not ha_config.enabled
admin_user_client.update(ha_config, enabled=True)
ha_config = find_one(admin_user_client.list_ha_config)
assert ha_config.enabled
admin_user_client.update(ha_config, enabled=False)
ha_config = find_one(admin_user_client.list_ha_config)
assert not ha_config.enabled
assert ha_config.dbHost in ['localhost', '127.0.0.1']
assert ha_config.dbSize > 0
def test_ha_config_script(admin_user_client):
ha_config = find_one(admin_user_client.list_ha_config)
create_url = ha_config.actions['createscript']
r = requests.post(create_url, data=json.dumps({
'clusterSize': 5,
'httpPort': 1234,
'httpsPort': 1235,
'redisPort': 6375,
'zookeeperQuorumPort': 6375,
'zookeeperLeaderPort': 6375,
'zookeeperClientPort': 6375,
'cert': 'cert',
'certChain': 'certChain',
'key': 'key',
'hostRegistrationUrl': 'https://....',
'swarmEnabled': False,
'httpEnabled': False,
}))
assert r.text is not None
assert r.status_code == 200
def check():
ha_config = find_one(admin_user_client.list_ha_config)
return ha_config.clusterSize == 5
wait_for(check)
@pytest.mark.nonparallel
def test_ha_config_dbdump(admin_user_client):
ha_config = find_one(admin_user_client.list_ha_config)
dump = ha_config.links['dbdump']
r = requests.get(dump)
assert r.text is not None
assert r.status_code == 200
|
ci/docker/docker-in-docker-image/_conftest.py | bugtsa/avito-android | 347 | 12713273 | import pytest
import testinfra
check_output = testinfra.get_host(
'local://'
).check_output
class CommandLineArguments:
def __init__(self, docker_image):
self.docker_image = docker_image
@pytest.fixture()
def host(request):
arguments = _parse_command_line_arguments(request)
image_id = arguments.docker_image or check_output('docker build -q %s', request.param)
container_id = check_output(
'docker run -d --entrypoint tail %s -f /dev/null', image_id
)
def teardown():
check_output('docker rm -f %s', container_id)
request.addfinalizer(teardown)
return testinfra.get_host('docker://' + container_id)
def _parse_command_line_arguments(request):
option_docker_image = request.config.getoption('--docker-image')
return CommandLineArguments(
docker_image=option_docker_image
)
def pytest_addoption(parser):
parser.addoption(
'--docker-image',
action='store',
type='string',
help='Login for admin bitbucket user',
required=False
)
def pytest_generate_tests(metafunc):
if 'host' in metafunc.fixturenames:
marker = metafunc.definition.get_closest_marker('docker')
if marker is None:
raise Exception('docker marker is required for infrastructure tests')
path = marker.kwargs.get('path')
if path is None:
path = '.'
metafunc.parametrize(
'host',
[path],
indirect=True,
scope='module'
)
|
tests/kibana_test.py | perceptron01/elastalert2 | 250 | 12713282 | <gh_stars>100-1000
import copy
import json
from elastalert.kibana import add_filter
from elastalert.kibana import dashboard_temp
from elastalert.kibana import filters_from_dashboard
from elastalert.kibana import kibana4_dashboard_link
from elastalert.util import EAException
# Dashboard schema with only filters section
test_dashboard = '''{
"title": "AD Lock Outs",
"services": {
"filter": {
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-7d",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "field",
"field": "_log_type",
"query": "\\"active_directory\\"",
"mandate": "must",
"active": true,
"alias": "",
"id": 1
},
"2": {
"type": "querystring",
"query": "ad.security_auditing_code:4740",
"mandate": "must",
"active": true,
"alias": "",
"id": 2
},
"3": {
"type": "range",
"field": "@timestamp",
"mandate": "must",
"active": true,
"alias": "",
"from": "2014-09-27T12:34:45Z",
"to": "2014-09-26T12:34:45Z",
"id": 3
},
"4": {
"field": "@timestamp",
"alias": "",
"mandate": "mustNot",
"active": true,
"query": "that",
"type": "field",
"id": 4
},
"5": {
"field": "@timestamp",
"alias": "",
"mandate": "either",
"active": true,
"query": "that",
"type": "field",
"id": 5
}
},
"ids": [
0,
1,
2,
3,
4,
5
]
}
}
}'''
test_dashboard = json.loads(test_dashboard)
test_dashboard2 = '''{
"title": "AD Lock Outs",
"services": {
"filter": {
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-7d",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "field",
"field": "_log_type",
"query": "\\"active_directory\\"",
"mandate": "must",
"active": true,
"alias": "",
"id": 1
}
},
"ids": [
0,
1
]
}
}
}'''
test_dashboard2 = json.loads(test_dashboard2)
def test_filters_from_dashboard():
filters = filters_from_dashboard(test_dashboard)
assert {'term': {'_log_type': '"active_directory"'}} in filters
assert {'query': {'query_string': {'query': 'ad.security_auditing_code:4740'}}} in filters
assert {'range': {'@timestamp': {'from': '2014-09-27T12:34:45Z', 'to': '2014-09-26T12:34:45Z'}}} in filters
assert {'not': {'term': {'@timestamp': 'that'}}} in filters
assert {'or': [{'term': {'@timestamp': 'that'}}]} in filters
def test_filters_from_dashboard2():
filters = filters_from_dashboard(test_dashboard2)
assert {'term': {'_log_type': '"active_directory"'}} in filters
def test_add_filter():
basic_filter = {"term": {"this": "that"}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, basic_filter)
assert db['services']['filter']['list']['1'] == {
'field': 'this',
'alias': '',
'mandate': 'must',
'active': True,
'query': '"that"',
'type': 'field',
'id': 1
}
list_filter = {"term": {"this": ["that", "those"]}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, list_filter)
assert db['services']['filter']['list']['1'] == {
'field': 'this',
'alias': '',
'mandate': 'must',
'active': True,
'query': '("that" AND "those")',
'type': 'field',
'id': 1
}
not_filter = {'not': {'term': {'this': 'that'}}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, not_filter)
assert db['services']['filter']['list']['1'] == {
'field': 'this',
'alias': '',
'mandate': 'mustNot',
'active': True,
'query': '"that"',
'type': 'field',
'id': 1
}
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
range_filter = {'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, range_filter)
assert db['services']['filter']['list']['1'] == {
'field': '@timestamp',
'alias': '',
'mandate': 'must',
'active': True,
'lte': '2014-09-27T12:34:45Z',
'gt': '2014-09-26T12:34:45Z',
'type': 'range',
'id': 1
}
query_filter = {'query': {'wildcard': 'this*that'}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, query_filter)
assert db['services']['filter']['list']['1'] == {
'alias': '',
'mandate': 'must',
'active': True,
'id': 1
}
query_string_filter = {'query': {'query_string': {'query': 'ad.security_auditing_code:4740'}}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, query_string_filter)
assert db['services']['filter']['list']['1'] == {
'alias': '',
'mandate': 'must',
'active': True,
'query': 'ad.security_auditing_code:4740',
'type': 'querystring',
'id': 1
}
try:
error_filter = {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}
db = copy.deepcopy(dashboard_temp)
add_filter(db, error_filter)
except EAException as ea:
excepted = "Could not parse filter {'bool': {'must': [{'range': {'@timestamp': "
excepted += "{'lte': '2014-09-27T12:34:45Z', 'gt': '2014-09-26T12:34:45Z'}}}]}} for Kibana"
assert excepted == str(ea)
def test_url_encoded():
url = kibana4_dashboard_link('example.com/#/Dashboard', '2015-01-01T00:00:00Z', '2017-01-01T00:00:00Z')
assert not any([special_char in url for special_char in ["',\":;?&=()"]])
def test_url_env_substitution(environ):
environ.update({
'KIBANA_HOST': 'kibana',
'KIBANA_PORT': '5601',
})
url = kibana4_dashboard_link(
'http://$KIBANA_HOST:$KIBANA_PORT/#/Dashboard',
'2015-01-01T00:00:00Z',
'2017-01-01T00:00:00Z',
)
assert url.startswith('http://kibana:5601/#/Dashboard')
|
src/tests/test_log.py | cclauss/happymac | 244 | 12713340 | from collections import defaultdict
import datetime
import log
from mock import patch
import os
import os.path
import preferences
import process
import psutil
#
# TODO: Fix tests, needs work on Auger's automatic test generator
#
from psutil import Popen
import sys
import unittest
import utils
import versions.v00001.process
import versions.v00001.suspender
from versions.v00001.suspender import defaultdict
import versions.v00001.utils
from versions.v00001.utils import OnMainThread
class LogTest(unittest.TestCase):
@patch.object(os.path, 'join')
@patch.object(os.path, 'exists')
def test_get_log_path(self, mock_exists, mock_join):
mock_exists.return_value = True
mock_join.return_value = '/Users/chris/HappyMacApp/downloads/v00001'
self.assertEqual(
log.get_log_path(),
'/Users/chris/HappyMacApp/happymac_log.txt'
)
def test_log(self):
self.assertEqual(
log.log(message='Google process 44784 ()',error=None),
None
)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.